aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c383
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1160
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c277
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h245
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1916
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c171
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c200
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c202
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1732
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h488
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c111
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c755
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4338
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h105
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c310
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c384
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c71
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c209
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3796
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c725
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c107
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c102
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
41 files changed, 11011 insertions, 7236 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce7fc77678b..2e9268da58d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,17 +11,21 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
+ i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_sysfs.o \
i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_ddi.o \
intel_dp.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
intel_panel.o \
+ intel_pm.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
@@ -34,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
- dvo_sil164.o
+ dvo_sil164.o \
+ i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6162a1681f..eb2b3c25b9e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -47,7 +47,6 @@ enum {
FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
- DEFERRED_FREE_LIST,
};
static const char *yesno(int v)
@@ -178,18 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case PINNED_LIST:
- seq_printf(m, "Pinned:\n");
- head = &dev_priv->mm.pinned_list;
- break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
- case DEFERRED_FREE_LIST:
- seq_printf(m, "Deferred free:\n");
- head = &dev_priv->mm.deferred_free_list;
- break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
@@ -252,21 +243,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.pinned_list, mm_list);
- seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.deferred_free_list, mm_list);
- seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
@@ -294,6 +275,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
+ uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
@@ -305,6 +287,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (list == PINNED_LIST && obj->pin_count == 0)
+ continue;
+
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
@@ -321,7 +306,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return 0;
}
-
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -430,10 +414,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
ring->name, ring->get_seqno(ring));
- seq_printf(m, "Waiter sequence (%s): %d\n",
- ring->name, ring->waiting_seqno);
- seq_printf(m, "IRQ sequence (%s): %d\n",
- ring->name, ring->irq_seqno);
}
}
@@ -468,7 +448,45 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
if (ret)
return ret;
- if (!HAS_PCH_SPLIT(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Master IER:\t%08x\n",
+ I915_READ(VLV_MASTER_IER));
+
+ seq_printf(m, "Render IER:\t%08x\n",
+ I915_READ(GTIER));
+ seq_printf(m, "Render IIR:\t%08x\n",
+ I915_READ(GTIIR));
+ seq_printf(m, "Render IMR:\t%08x\n",
+ I915_READ(GTIMR));
+
+ seq_printf(m, "PM IER:\t\t%08x\n",
+ I915_READ(GEN6_PMIER));
+ seq_printf(m, "PM IIR:\t\t%08x\n",
+ I915_READ(GEN6_PMIIR));
+ seq_printf(m, "PM IMR:\t\t%08x\n",
+ I915_READ(GEN6_PMIMR));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ } else if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -564,69 +582,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_ringbuffer_data(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (!ring->obj) {
- seq_printf(m, "No ringbuffer setup\n");
- } else {
- const u8 __iomem *virt = ring->virtual_start;
- uint32_t off;
-
- for (off = 0; off < ring->size; off += 4) {
- uint32_t *ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_ringbuffer_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (ring->size == 0)
- return 0;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- seq_printf(m, "Ring %s:\n", ring->name);
- seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
- seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
- seq_printf(m, " Size : %08x\n", ring->size);
- seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
- seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
- seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
- }
- seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
- seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *ring_str(int ring)
{
switch (ring) {
@@ -704,6 +659,7 @@ static void i915_ring_error_state(struct seq_file *m,
struct drm_i915_error_state *error,
unsigned ring)
{
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
seq_printf(m, "%s command stream:\n", ring_str(ring));
seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -718,8 +674,8 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
- seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
@@ -727,31 +683,35 @@ static void i915_ring_error_state(struct seq_file *m,
error->semaphore_mboxes[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
static int i915_error_state(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
+ struct drm_i915_error_state *error = error_priv->error;
+ struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (!dev_priv->first_error) {
+ if (!error) {
seq_printf(m, "no error state collected\n");
- goto out;
+ return 0;
}
- error = dev_priv->first_error;
-
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -762,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- i915_ring_error_state(m, dev, error, RCS);
- if (HAS_BLT(dev))
- i915_ring_error_state(m, dev, error, BCS);
- if (HAS_BSD(dev))
- i915_ring_error_state(m, dev, error, VCS);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -828,12 +785,71 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->display)
intel_display_print_error_state(m, dev, error->display);
-out:
+ return 0;
+}
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ mutex_lock(&dev->struct_mutex);
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_error_state_file_priv *error_priv;
+ unsigned long flags;
+
+ error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+ if (!error_priv)
+ return -ENOMEM;
+
+ error_priv->dev = dev;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ error_priv->error = dev_priv->first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
- return 0;
+ return single_open(file, i915_error_state, error_priv);
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+ kfree(error_priv);
+
+ return single_release(inode, file);
}
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = seq_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = i915_error_state_release,
+};
+
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1132,6 +1148,17 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+ /* Not exactly sure what this is */
+ seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+ seq_printf(m, "RC6 residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6));
+ seq_printf(m, "RC6+ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6p));
+ seq_printf(m, "RC6++ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6pp));
+
return 0;
}
@@ -1306,17 +1333,25 @@ static int i915_opregion(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
int ret;
+ if (data == NULL)
+ return -ENOMEM;
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
+ if (opregion->header) {
+ memcpy_fromio(data, opregion->header, OPREGION_SIZE);
+ seq_write(m, data, OPREGION_SIZE);
+ }
mutex_unlock(&dev->struct_mutex);
+out:
+ kfree(data);
return 0;
}
@@ -1505,6 +1540,53 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_dpio_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+
+ if (!IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "unsupported\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
+
+ seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_A));
+ seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_B));
+
+ seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
+ seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
+
+ seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+ seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+
+ seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
+ seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
+
+ seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
+ intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
static ssize_t
i915_wedged_read(struct file *filp,
char __user *ubuf,
@@ -1562,6 +1644,65 @@ static const struct file_operations i915_wedged_fops = {
};
static ssize_t
+i915_ring_stop_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[20];
+ int len;
+
+ len = snprintf(buf, sizeof(buf),
+ "0x%08x\n", dev_priv->stop_rings);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_ring_stop_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 0;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->stop_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static const struct file_operations i915_ring_stop_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_ring_stop_read,
+ .write = i915_ring_stop_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
size_t max,
@@ -1738,7 +1879,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
}
-int i915_forcewake_release(struct inode *inode, struct file *file)
+static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1803,11 +1944,10 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
+ {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
- {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
- {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -1816,13 +1956,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
- {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
- {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
- {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
- {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
- {"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -1839,6 +1972,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
+ {"i915_dpio", i915_dpio_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -1867,6 +2001,17 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_ring_stop",
+ &i915_ring_stop_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_error_state",
+ &i915_error_state_fops);
+ if (ret)
+ return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -1885,6 +2030,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba60f3c8f91..f94792626b9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
@@ -34,15 +36,62 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
-#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <acpi/video.h>
+#include <asm/pat.h>
+
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
+} while (0)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
+{
+ if (I915_NEED_GFX_HWS(dev_priv->dev))
+ return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
+ else
+ return intel_read_status_page(LP_RING(dev_priv), reg);
+}
+
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX 0x21
+
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+}
static void i915_write_hws_pga(struct drm_device *dev)
{
@@ -97,7 +146,7 @@ static void i915_free_hws(struct drm_device *dev)
if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
}
/* Need to rewrite hardware status page */
@@ -195,7 +244,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
/* Allow hardware batchbuffers unless told otherwise.
*/
- dev_priv->allow_batchbuffer = 1;
+ dev_priv->dri1.allow_batchbuffer = 1;
return 0;
}
@@ -207,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (ring->map.handle == NULL) {
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@@ -236,6 +285,9 @@ static int i915_dma_init(struct drm_device *dev, void *data,
drm_i915_init_t *init = data;
int retcode = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
@@ -578,6 +630,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
@@ -598,7 +653,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
int ret;
struct drm_clip_rect *cliprects = NULL;
- if (!dev_priv->allow_batchbuffer) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
@@ -655,6 +713,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects < 0)
@@ -706,11 +767,166 @@ fail_batch_free:
return ret;
}
+static int i915_emit_irq(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 1;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+
+ return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret = 0;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+ }
+
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ }
+
+ return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+static int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+static int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t *irqwait = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+static int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+}
+
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
DRM_DEBUG_DRIVER("%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -739,7 +955,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
+ value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
@@ -748,7 +964,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -761,13 +977,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_HAS_BSD:
- value = HAS_BSD(dev);
+ value = intel_ring_initialized(&dev_priv->ring[VCS]);
break;
case I915_PARAM_HAS_BLT:
- value = HAS_BLT(dev);
+ value = intel_ring_initialized(&dev_priv->ring[BCS]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
@@ -787,6 +1003,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -816,10 +1035,9 @@ static int i915_setparam(struct drm_device *dev, void *data,
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param->value;
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->allow_batchbuffer = param->value;
+ dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
@@ -844,6 +1062,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -861,23 +1082,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->base + hws->addr;
- dev_priv->hws_map.size = 4*1024;
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
- drm_core_ioremap_wc(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
+ dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
+ 4096);
+ if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr =
- (void __force __iomem *)dev_priv->hws_map.handle;
- memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1013,133 +1228,6 @@ intel_teardown_mchbar(struct drm_device *dev)
release_resource(&dev_priv->mch_res);
}
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev_priv->bridge_dev;
- u32 base;
-
-#if 0
- /* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
- */
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
- pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
- u8 val;
- pci_read_config_byte(pdev, 0x9c, &val);
- base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
-#endif
-
- return base + offset;
-}
-
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
- DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
-}
-
-static void i915_setup_compression(struct drm_device *dev, int size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
-
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
- if (compressed_fb)
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb)
- goto err;
-
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
- if (!cfb_base)
- goto err_fb;
-
- if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
- 4096, 4096, 0);
- if (compressed_llb)
- compressed_llb = drm_mm_get_block(compressed_llb,
- 4096, 4096);
- if (!compressed_llb)
- goto err_fb;
-
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
- if (!ll_base)
- goto err_llb;
- }
-
- dev_priv->cfb_size = size;
-
- dev_priv->compressed_fb = compressed_fb;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- dev_priv->compressed_llb = compressed_llb;
- }
-
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
- return;
-
-err_llb:
- drm_mm_put_block(compressed_llb);
-err_fb:
- drm_mm_put_block(compressed_fb);
-err:
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
-}
-
-static void i915_cleanup_compression(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- drm_mm_put_block(dev_priv->compressed_fb);
- if (dev_priv->compressed_llb)
- drm_mm_put_block(dev_priv->compressed_llb);
-}
-
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1158,14 +1246,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
- printk(KERN_INFO "i915: switched on\n");
+ pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- printk(KERN_ERR "i915: switched off\n");
+ pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -1183,88 +1271,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
-
- return true;
-}
-
-static int i915_load_gem_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size, gtt_size, mappable_size;
- int ret;
-
- prealloc_size = dev_priv->mm.gtt->stolen_size;
- gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
- /* Basic memrange allocator for stolen space */
- drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
-
- mutex_lock(&dev->struct_mutex);
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
- /* For paranoia keep the guard page in between. */
- gtt_size -= PAGE_SIZE;
-
- i915_gem_do_init(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
- }
-
- ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
- return ret;
- }
-
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
-
- /* Leave 1M for line length buffer & misc. */
-
- /* Try to get a 32M buffer... */
- if (prealloc_size > (36*1024*1024))
- cfb_size = 32*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
- }
-
- /* Allow hardware batchbuffers unless told otherwise. */
- dev_priv->allow_batchbuffer = 1;
- return 0;
-}
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
static int i915_load_modeset_init(struct drm_device *dev)
{
@@ -1288,22 +1299,22 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev,
- i915_switcheroo_set_state,
- NULL,
- i915_switcheroo_can_switch);
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
if (ret)
goto cleanup_vga_client;
- /* IIR "flip pending" bit means done if this bit is set */
- if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
- dev_priv->flip_pending_is_done = true;
+ /* Initialise stolen first so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(dev);
+ if (ret)
+ goto cleanup_vga_switcheroo;
intel_modeset_init(dev);
- ret = i915_load_gem_init(dev);
+ ret = i915_gem_init(dev);
if (ret)
- goto cleanup_vga_switcheroo;
+ goto cleanup_gem_stolen;
intel_modeset_gem_init(dev);
@@ -1333,6 +1344,8 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_gem_stolen:
+ i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@@ -1365,572 +1378,26 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 tmp;
-
- tmp = I915_READ(CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u16 ddrpll, csipll;
-
- ddrpll = I915_READ16(DDRMPLL1);
- csipll = I915_READ16(CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- dev_priv->r_t = dev_priv->mem_freq;
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
- } else {
- dev_priv->c_m = 2;
- }
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
-
- diff1 = now - dev_priv->last_time1;
-
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
- */
- if (diff1 <= 10)
- return dev_priv->chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
- }
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
-
- dev_priv->chipset_power = ret;
-
- return ret;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
-{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = I915_READ(TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = I915_READ8(TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- static const struct v_table {
- u16 vd; /* in .1 mil */
- u16 vm; /* in .1 mil */
- } v_table[] = {
- { 0, 0, },
- { 375, 0, },
- { 500, 0, },
- { 625, 0, },
- { 750, 0, },
- { 875, 0, },
- { 1000, 0, },
- { 1125, 0, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4250, 3125, },
- { 4375, 3250, },
- { 4500, 3375, },
- { 4625, 3500, },
- { 4750, 3625, },
- { 4875, 3750, },
- { 5000, 3875, },
- { 5125, 4000, },
- { 5250, 4125, },
- { 5375, 4250, },
- { 5500, 4375, },
- { 5625, 4500, },
- { 5750, 4625, },
- { 5875, 4750, },
- { 6000, 4875, },
- { 6125, 5000, },
- { 6250, 5125, },
- { 6375, 5250, },
- { 6500, 5375, },
- { 6625, 5500, },
- { 6750, 5625, },
- { 6875, 5750, },
- { 7000, 5875, },
- { 7125, 6000, },
- { 7250, 6125, },
- { 7375, 6250, },
- { 7500, 6375, },
- { 7625, 6500, },
- { 7750, 6625, },
- { 7875, 6750, },
- { 8000, 6875, },
- { 8125, 7000, },
- { 8250, 7125, },
- { 8375, 7250, },
- { 8500, 7375, },
- { 8625, 7500, },
- { 8750, 7625, },
- { 8875, 7750, },
- { 9000, 7875, },
- { 9125, 8000, },
- { 9250, 8125, },
- { 9375, 8250, },
- { 9500, 8375, },
- { 9625, 8500, },
- { 9750, 8625, },
- { 9875, 8750, },
- { 10000, 8875, },
- { 10125, 9000, },
- { 10250, 9125, },
- { 10375, 9250, },
- { 10500, 9375, },
- { 10625, 9500, },
- { 10750, 9625, },
- { 10875, 9750, },
- { 11000, 9875, },
- { 11125, 10000, },
- { 11250, 10125, },
- { 11375, 10250, },
- { 11500, 10375, },
- { 11625, 10500, },
- { 11750, 10625, },
- { 11875, 10750, },
- { 12000, 10875, },
- { 12125, 11000, },
- { 12250, 11125, },
- { 12375, 11250, },
- { 12500, 11375, },
- { 12625, 11500, },
- { 12750, 11625, },
- { 12875, 11750, },
- { 13000, 11875, },
- { 13125, 12000, },
- { 13250, 12125, },
- { 13375, 12250, },
- { 13500, 12375, },
- { 13625, 12500, },
- { 13750, 12625, },
- { 13875, 12750, },
- { 14000, 12875, },
- { 14125, 13000, },
- { 14250, 13125, },
- { 14375, 13250, },
- { 14500, 13375, },
- { 14625, 13500, },
- { 14750, 13625, },
- { 14875, 13750, },
- { 15000, 13875, },
- { 15125, 14000, },
- { 15250, 14125, },
- { 15375, 14250, },
- { 15500, 14375, },
- { 15625, 14500, },
- { 15750, 14625, },
- { 15875, 14750, },
- { 16000, 14875, },
- { 16125, 15000, },
- };
- if (dev_priv->info->is_mobile)
- return v_table[pxvid].vm;
- else
- return v_table[pxvid].vd;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void
+i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
+ unsigned long size)
{
- struct timespec now, diff1;
- u64 diff;
- unsigned long diffms;
- u32 count;
-
- if (dev_priv->info->gen != 5)
- return;
-
- getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->last_time2);
+ dev_priv->mm.gtt_mtrr = -1;
- /* Don't divide by 0 */
- diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
- if (!diffms)
+#if defined(CONFIG_X86_PAT)
+ if (cpu_has_pat)
return;
+#endif
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->last_count2;
- }
-
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->gfx_power = diff;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- i915_update_gfx_val(dev_priv);
-
- return dev_priv->gfx_power + state2;
-}
-
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
-
- ret = chipset_val + graphics_val;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = false;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- ret = dev_priv->busy;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->max_delay = dev_priv->fstart;
-
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
- ret = false;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
+ /* Set up a WC MTRR for non-PAT systems. This is more common than
+ * one would think, because the kernel disables PAT on first
+ * generation Core chips because WC PAT gets overridden by a UC
+ * MTRR if present. Even if a UC MTRR isn't present.
+ */
+ dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
+ if (dev_priv->mm.gtt_mtrr < 0) {
+ DRM_INFO("MTRR allocation failed. Graphics "
+ "performance may suffer.\n");
}
}
@@ -1948,8 +1415,16 @@ ips_ping_for_i915_load(void)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
+ struct intel_device_info *info;
int ret = 0, mmio_bar;
- uint32_t agp_size;
+ uint32_t aperture_size;
+
+ info = (struct intel_device_info *) flags;
+
+ /* Refuse to load on gen6+ without kms enabled. */
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
/* i915 has 4 more counters */
dev->counters += 4;
@@ -1964,7 +1439,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
- dev_priv->info = (struct intel_device_info *) flags;
+ dev_priv->info = info;
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
@@ -2003,27 +1478,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap;
}
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base, agp_size);
+ io_mapping_create_wc(dev->agp->base, aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
}
- /* Set up a WC MTRR for non-PAT systems. This is more common than
- * one would think, because the kernel disables PAT on first
- * generation Core chips because WC PAT gets overridden by a UC
- * MTRR if present. Even if a UC MTRR isn't present.
- */
- dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- agp_size,
- MTRR_TYPE_WRCOMB, 1);
- if (dev_priv->mm.gtt_mtrr < 0) {
- DRM_INFO("MTRR allocation failed. Graphics "
- "performance may suffer.\n");
- }
+ i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -2047,9 +1511,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- /* enable GEM by default */
- dev_priv->has_gem = 1;
-
intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -2069,11 +1530,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
- i915_ironlake_get_mem_freq(dev);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -2093,7 +1549,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
@@ -2117,6 +1573,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
}
+ i915_setup_sysfs(dev);
+
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
@@ -2124,14 +1582,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
- if (IS_GEN5(dev)) {
- spin_lock(&mchdev_lock);
- i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
-
- ips_ping_for_i915_load();
- }
+ if (IS_GEN5(dev))
+ intel_gpu_ips_init(dev_priv);
return 0;
@@ -2166,17 +1618,18 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- spin_lock(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock(&mchdev_lock);
+ intel_gpu_ips_teardown();
+
+ i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.shrink)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
@@ -2228,8 +1681,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
+ i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2277,7 +1729,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
- * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
@@ -2322,7 +1774,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -2355,16 +1807,10 @@ struct drm_ioctl_desc i915_ioctls[] = {
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i9x5 is AGP.
+/*
+ * This is really ugly: Because old userspace abused the linux agp interface to
+ * manage the gtt, we need to claim that all intel devices are agp. For
+ * otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ae8a64f9f84..238a5216583 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -84,6 +84,12 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
+int i915_lvds_channel_mode __read_mostly;
+module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
@@ -93,8 +99,8 @@ MODULE_PARM_DESC(lvds_use_ssc,
int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
- "Override selection of SDVO panel mode in the VBT "
- "(default: auto)");
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
@@ -209,6 +215,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -216,6 +223,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -224,6 +232,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -233,6 +242,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -241,6 +251,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -250,6 +261,43 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+ .is_haswell = 1, .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ .is_haswell = 1, .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -297,6 +345,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
{0, 0, 0}
};
@@ -308,6 +363,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
void intel_detect_pch(struct drm_device *dev)
{
@@ -328,20 +384,45 @@ void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
}
+ BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
pci_dev_put(pch);
}
}
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return 1;
+}
+
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -366,7 +447,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(FORCEWAKE_MT);
count = 0;
@@ -408,7 +489,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -446,6 +527,31 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
+void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+
+ /* Already awake? */
+ if ((I915_READ(0x130094) & 0xa1) == 0xa1)
+ return;
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
+ POSTING_READ(FORCEWAKE_VLV);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
+ udelay(10);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
+ /* FIXME: confirm VLV behavior with Punit folks */
+ POSTING_READ(FORCEWAKE_VLV);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -525,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
+
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
-
+ intel_modeset_init_hw(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
@@ -541,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex);
-
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
}
intel_opregion_init(dev);
@@ -576,7 +680,7 @@ int i915_resume(struct drm_device *dev)
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -610,11 +714,12 @@ static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return gdrst & 0x1;
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int i965_do_reset(struct drm_device *dev, u8 flags)
+static int i965_do_reset(struct drm_device *dev)
{
+ int ret;
u8 gdrst;
/*
@@ -623,20 +728,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
* triggers the reset; when done, the hardware will clear it.
*/
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_RENDER |
+ GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_MEDIA |
+ GRDOM_RESET_ENABLE);
return wait_for(i965_reset_complete(dev), 500);
}
-static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
-static int gen6_do_reset(struct drm_device *dev, u8 flags)
+static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -671,10 +799,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
return ret;
}
+static int intel_gpu_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = -ENODEV;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ ret = gen6_do_reset(dev);
+ break;
+ case 5:
+ ret = ironlake_do_reset(dev);
+ break;
+ case 4:
+ ret = i965_do_reset(dev);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev);
+ break;
+ }
+
+ /* Also reset the gpu hangman. */
+ if (dev_priv->stop_rings) {
+ DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
- * @flags: reset domains
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
@@ -687,14 +849,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- /*
- * We really should only reset the display subsystem if we actually
- * need to
- */
- bool need_display = true;
int ret;
if (!i915_try_reset)
@@ -703,26 +860,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
+ dev_priv->stop_rings = 0;
+
i915_gem_reset(dev);
ret = -ENODEV;
- if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+ if (get_seconds() - dev_priv->last_gpu_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
- } else switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = gen6_do_reset(dev, flags);
- break;
- case 5:
- ret = ironlake_do_reset(dev, flags);
- break;
- case 4:
- ret = i965_do_reset(dev, flags);
- break;
- case 2:
- ret = i8xx_do_reset(dev, flags);
- break;
- }
+ else
+ ret = intel_gpu_reset(dev);
+
dev_priv->last_gpu_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
@@ -746,36 +893,27 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring;
+ int i;
+
dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev);
- dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
- if (HAS_BSD(dev))
- dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
- if (HAS_BLT(dev))
- dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+ for_each_ring(ring, dev_priv, i)
+ ring->init(ring);
i915_gem_init_ppgtt(dev);
mutex_unlock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- drm_mode_config_reset(dev);
- drm_irq_install(dev);
- mutex_lock(&dev->struct_mutex);
- }
- mutex_unlock(&dev->struct_mutex);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_modeset_init_hw(dev);
- /*
- * Perform a full modeset as on later generations, e.g. Ironlake, we may
- * need to retrain the display link and cannot just restore the register
- * values.
- */
- if (need_display) {
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_irq_uninstall(dev);
+ drm_irq_install(dev);
+ } else {
+ mutex_unlock(&dev->struct_mutex);
}
return 0;
@@ -874,7 +1012,7 @@ static const struct dev_pm_ops i915_pm_ops = {
.restore = i915_pm_resume,
};
-static struct vm_operations_struct i915_gem_vm_ops = {
+static const struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -901,7 +1039,7 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
@@ -924,6 +1062,12 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = i915_gem_prime_export,
+ .gem_prime_import = i915_gem_prime_import,
+
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
@@ -993,6 +1137,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ (((dev_priv)->info->gen >= 6) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE)) && \
+ (!IS_VALLEYVIEW((dev_priv)->dev))
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5fabc6c31fe..377c21f531e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,8 @@
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
/* General customization:
*/
@@ -63,10 +65,30 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
+enum port {
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+struct intel_pch_pll {
+ int refcount; /* count of number of CRTCs sharing this PLL */
+ int active; /* count of number of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ int pll_reg;
+ int fp0_reg;
+ int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
/* Interface history:
*
* 1.1: Original.
@@ -111,11 +133,11 @@ struct opregion_asle;
struct drm_i915_private;
struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- struct opregion_asle *asle;
- void *vbt;
+ struct opregion_header __iomem *header;
+ struct opregion_acpi __iomem *acpi;
+ struct opregion_swsci __iomem *swsci;
+ struct opregion_asle __iomem *asle;
+ void __iomem *vbt;
u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@@ -135,7 +157,6 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
- uint32_t setup_seqno;
int pin_count;
};
@@ -151,8 +172,11 @@ struct sdvo_device_mapping {
struct intel_display_error_state;
struct drm_i915_error_state {
+ struct kref ref;
u32 eir;
u32 pgtbl_er;
+ u32 ier;
+ bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
@@ -218,11 +242,15 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
+ void (*sanitize_pm)(struct drm_device *dev);
+ void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -255,6 +283,9 @@ struct intel_device_info {
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
+ u8 is_valleyview:1;
+ u8 has_pch_split:1;
+ u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
@@ -291,10 +322,12 @@ enum no_fbc_reason {
enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
+ PCH_LPT, /* Lynxpoint PCH */
};
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
struct intel_fbdev;
struct intel_fbc_work;
@@ -302,7 +335,6 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
bool force_bit;
- bool has_gpio;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
@@ -314,7 +346,6 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
- int has_gem;
int relative_constants_mode;
void __iomem *regs;
@@ -326,19 +357,23 @@ typedef struct drm_i915_private {
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
- struct intel_gmbus *gmbus;
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
- drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
@@ -354,6 +389,10 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
@@ -363,22 +402,20 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
- int tex_lru_log_granularity;
- int allow_batchbuffer;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int vblank_pipe;
int num_pipe;
+ int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
- uint32_t last_acthd;
- uint32_t last_acthd_bsd;
- uint32_t last_acthd_blt;
+ uint32_t last_acthd[I915_NUM_RINGS];
uint32_t last_instdone;
uint32_t last_instdone1;
+ unsigned int stop_rings;
+
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
@@ -405,6 +442,8 @@ typedef struct drm_i915_private {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@@ -428,6 +467,7 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
@@ -652,24 +692,10 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
- /**
- * LRU list of objects which are not in the ringbuffer but
- * are still pinned in the GTT.
- */
- struct list_head pinned_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
- * List of objects currently pending being freed.
- *
- * These objects are no longer in use, but due to a signal
- * we were prevented from freeing them at the appointed time.
- */
- struct list_head deferred_free_list;
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -717,6 +743,16 @@ typedef struct drm_i915_private {
size_t object_memory;
u32 object_count;
} mm;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+ } dri1;
+
+ /* Kernel Modesetting */
+
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
@@ -726,7 +762,8 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
- bool flip_pending_is_done;
+
+ struct intel_pch_pll pch_plls[I915_NUM_PLLS];
/* Reclocking support */
bool render_reclock_avail;
@@ -781,6 +818,11 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
} drm_i915_private_t;
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
@@ -844,7 +886,14 @@ struct drm_i915_gem_object {
* Current tiling mode for the object.
*/
unsigned int tiling_mode:2;
- unsigned int tiling_changed:1;
+ /**
+ * Whether the tiling parameters for the currently associated fence
+ * register have changed. Note that for the purposes of tracking
+ * tiling changes we also treat the unfenced register, the register
+ * slot that the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ unsigned int fence_dirty:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -881,6 +930,7 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
unsigned int has_aliasing_ppgtt_mapping:1;
+ unsigned int has_global_gtt_mapping:1;
struct page **pages;
@@ -890,6 +940,8 @@ struct drm_i915_gem_object {
struct scatterlist *sg_list;
int num_sg;
+ /* prime dma-buf support */
+ struct sg_table *sg_table;
/**
* Used for performing relocations during execbuffer insertion.
*/
@@ -904,13 +956,12 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
- /** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
struct intel_ring_buffer *ring;
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
- struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -918,13 +969,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
-
- /**
- * If present, while GEM_DOMAIN_CPU is in the read domain this array
- * flags which individual pages are valid.
- */
- uint8_t *page_cpu_valid;
-
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
struct drm_file *pin_filp;
@@ -1001,6 +1045,8 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
/*
@@ -1044,10 +1090,11 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1081,6 +1128,7 @@ extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
+extern int i915_lvds_channel_mode __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
@@ -1094,6 +1142,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
@@ -1104,12 +1153,14 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
-extern int i915_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1119,19 +1170,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
-extern int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern void intel_irq_init(struct drm_device *dev);
-extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+void i915_error_state_free(struct kref *error_ref);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1205,8 +1247,12 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
@@ -1229,17 +1275,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-static inline void
+static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
- }
+ return true;
+ } else
+ return false;
}
static inline void
@@ -1260,27 +1307,25 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire);
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
@@ -1301,6 +1346,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags);
+
+
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
@@ -1311,18 +1363,24 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
-int __must_check i915_gem_evict_everything(struct drm_device *dev,
- bool purgeable_only);
-int __must_check i915_gem_evict_inactive(struct drm_device *dev,
- bool purgeable_only);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1354,9 +1412,20 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* i915_sysfs.c */
+void i915_setup_sysfs(struct drm_device *dev_priv);
+void i915_teardown_sysfs(struct drm_device *dev_priv);
+
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
+extern inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+ return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+extern struct i2c_adapter *intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1391,6 +1460,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */
/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1403,12 +1473,17 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1420,28 +1495,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
struct intel_display_error_state *error);
#endif
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
- intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
- intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
- intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file); \
-} while (0)
-
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
@@ -1450,12 +1503,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0d1e4b7b4b9..c1e5c66553d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,31 +35,41 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
- bool write);
-static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
-static void i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file);
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj);
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable);
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -122,26 +132,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active && obj->pin_count == 0;
-}
-
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
-
- dev_priv->mm.gtt_start = start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
- dev_priv->mm.gtt_end = end;
- dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-
- /* Take over this portion of the GTT */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ return !obj->active;
}
int
@@ -150,12 +141,20 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_init *args = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
+ /* GEM with user mode setting was never supported on ilk and later. */
+ if (INTEL_INFO(dev)->gen >= 5)
+ return -ENODEV;
+
mutex_lock(&dev->struct_mutex);
- i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
+ i915_gem_init_global_gtt(dev, args->gtt_start,
+ args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -170,13 +169,11 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
size_t pinned;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- pinned += obj->gtt_space->size;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
@@ -247,6 +244,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -259,66 +257,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
-/**
- * This is the fast shmem pread path, which attempts to copy_from_user directly
- * from the backing pages of the object to the user's address space. On a
- * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
- */
-static int
-i915_gem_shmem_pread_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- vaddr = kmap_atomic(page);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- mark_page_accessed(page);
- page_cache_release(page);
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
- return 0;
-}
-
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -346,8 +284,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr,
}
static inline int
-__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
- const char *cpu_vaddr,
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+ const char __user *cpu_vaddr,
int length)
{
int ret, cpu_offset = 0;
@@ -371,37 +309,121 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
return 0;
}
-/**
- * This is the fallback shmem pread path, which allocates temporary storage
- * in kernel space to copy_to_user into outside of the struct_mutex, so we
- * can copy out of the object's backing pages while holding the struct mutex
- * and not take page faults.
- */
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
+
+ vaddr = kmap_atomic(page);
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ return ret;
+}
+
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+ bool swizzled)
+{
+ if (unlikely(swizzled)) {
+ unsigned long start = (unsigned long) addr;
+ unsigned long end = (unsigned long) addr + length;
+
+ /* For swizzling simply ensure that we always flush both
+ * channels. Lame, but simple and it works. Swizzled
+ * pwrite/pread is far from a hotpath - current userspace
+ * doesn't use it at all. */
+ start = round_down(start, 128);
+ end = round_up(end, 128);
+
+ drm_clflush_virt_range((void *)start, end - start);
+ } else {
+ drm_clflush_virt_range(addr, length);
+ }
+
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ vaddr = kmap(page);
+ if (needs_clflush)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+
+ if (page_do_bit17_swizzling)
+ ret = __copy_to_user_swizzled(user_data,
+ vaddr, shmem_page_offset,
+ page_length);
+ else
+ ret = __copy_to_user(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap(page);
+
+ return ret;
+}
+
+static int
+i915_gem_shmem_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int prefaulted = 0;
+ int needs_clflush = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- offset = args->offset;
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
- mutex_unlock(&dev->struct_mutex);
+ offset = args->offset;
while (remain > 0) {
struct page *page;
- char *vaddr;
/* Operation in this page
*
@@ -413,28 +435,51 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
- else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap(page);
+ ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+ if (ret == 0)
+ goto next_page;
- mark_page_accessed(page);
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!prefaulted) {
+ ret = fault_in_multipages_writeable(user_data, remain);
+ /* Userspace is tricking us, but we've already clobbered
+ * its pages with the prefault and promised to write the
+ * data up to the first fault. Hence ignore any errors
+ * and just continue. */
+ (void)ret;
+ prefaulted = 1;
+ }
+
+ ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+
+ mutex_lock(&dev->struct_mutex);
page_cache_release(page);
+next_page:
+ mark_page_accessed(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -447,10 +492,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ }
return ret;
}
@@ -476,11 +522,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret)
- return -EFAULT;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -498,19 +539,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- trace_i915_gem_object_pread(obj, args->offset, args->size);
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj,
- args->offset,
- args->size);
- if (ret)
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
goto out;
+ }
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+
+ ret = i915_gem_shmem_pread(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -529,40 +568,19 @@ fast_user_write(struct io_mapping *mapping,
char __user *user_data,
int length)
{
- char *vaddr_atomic;
+ void __iomem *vaddr_atomic;
+ void *vaddr;
unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
- unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = (void __force*)vaddr_atomic + page_offset;
+ unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
-/* Here's the write path which can sleep for
- * page faults
- */
-
-static inline void
-slow_kernel_write(struct io_mapping *mapping,
- loff_t gtt_base, int gtt_offset,
- struct page *user_page, int user_offset,
- int length)
-{
- char __iomem *dst_vaddr;
- char *src_vaddr;
-
- dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
- src_vaddr = kmap(user_page);
-
- memcpy_toio(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
-
- kunmap(user_page);
- io_mapping_unmap(dst_vaddr);
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
@@ -577,7 +595,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
- int page_offset, page_length;
+ int page_offset, page_length, ret;
+
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -602,214 +632,133 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* retry in the slow path.
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
- page_offset, user_data, page_length))
- return -EFAULT;
+ page_offset, user_data, page_length)) {
+ ret = -EFAULT;
+ goto out_unpin;
+ }
remain -= page_length;
user_data += page_length;
offset += page_length;
}
- return 0;
+out_unpin:
+ i915_gem_object_unpin(obj);
+out:
+ return ret;
}
-/**
- * This is the fallback GTT pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This code resulted in x11perf -rgb10text consuming about 10% more CPU
- * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
- */
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- ssize_t remain;
- loff_t gtt_page_base, offset;
- loff_t first_data_page, last_data_page, num_pages;
- loff_t pinned_pages, i;
- struct page **user_pages;
- struct mm_struct *mm = current->mm;
- int gtt_page_offset, data_page_offset, data_page_index, page_length;
+ char *vaddr;
int ret;
- uint64_t data_ptr = args->data_ptr;
-
- remain = args->size;
-
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
-
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out_unpin_pages;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin_pages;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin_pages;
-
- offset = obj->gtt_offset + args->offset;
-
- while (remain > 0) {
- /* Operation in this page
- *
- * gtt_page_base = page offset within aperture
- * gtt_page_offset = offset within page in aperture
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
- * page_length = bytes to copy for this page
- */
- gtt_page_base = offset & PAGE_MASK;
- gtt_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
-
- page_length = remain;
- if ((gtt_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - gtt_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
- slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- remain -= page_length;
- offset += page_length;
- data_ptr += page_length;
- }
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
-out_unpin_pages:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+ vaddr = kmap_atomic(page);
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
return ret;
}
-/**
- * This is the fast shmem pwrite path, which attempts to directly
- * copy_from_user into the kmapped pages backing the object.
- */
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
- obj->dirty = 1;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ char *vaddr;
+ int ret;
- vaddr = kmap_atomic(page);
- ret = __copy_from_user_inatomic(vaddr + page_offset,
+ vaddr = kmap(page);
+ if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ if (page_do_bit17_swizzling)
+ ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
user_data,
page_length);
- kunmap_atomic(vaddr);
-
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ else
+ ret = __copy_from_user(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ kunmap(page);
- return 0;
+ return ret;
}
-/**
- * This is the fallback shmem pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This avoids taking mmap_sem for faulting on the user's address while the
- * struct_mutex is held.
- */
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_shmem_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int needs_clflush_after = 0;
+ int needs_clflush_before = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /* If we're not in the cpu write domain, set ourself into the gtt
+ * write domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_after = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
+ /* Same trick applies for invalidate partially written cachelines before
+ * writing. */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+ && obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_before = 1;
+
offset = args->offset;
obj->dirty = 1;
- mutex_unlock(&dev->struct_mutex);
-
while (remain > 0) {
struct page *page;
- char *vaddr;
+ int partial_cacheline_write;
/* Operation in this page
*
@@ -822,29 +771,51 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch. */
+ partial_cacheline_write = needs_clflush_before &&
+ ((shmem_page_offset | page_length)
+ & (boot_cpu_data.x86_clflush_size - 1));
+
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
- else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
- kunmap(page);
+ ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+ if (ret == 0)
+ goto next_page;
+
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+ mutex_lock(&dev->struct_mutex);
+ page_cache_release(page);
+next_page:
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -857,17 +828,21 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ /* and flush dirty cachelines in case the object isn't in the cpu write
+ * domain anymore. */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ intel_gtt_chipset_flush();
+ }
}
+ if (needs_clflush_after)
+ intel_gtt_chipset_flush();
+
return ret;
}
@@ -892,8 +867,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
+ ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
if (ret)
return -EFAULT;
@@ -914,8 +889,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
+ goto out;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+ ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -928,42 +912,18 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
if (obj->gtt_space &&
+ obj->cache_level == I915_CACHE_NONE &&
+ obj->tiling_mode == I915_TILING_NONE &&
+ obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0, true);
- if (ret)
- goto out;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
-
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
-
-out_unpin:
- i915_gem_object_unpin(obj);
-
- if (ret != -EFAULT)
- goto out;
- /* Fall through to the shmfs paths because the gtt paths might
- * fail with non-page-backed user pointers (e.g. gtt mappings
- * when moving data between textures). */
+ /* Note that the gtt paths might fail with non-page-backed user
+ * pointers (e.g. gtt mappings when moving data between
+ * textures). Fallback to the shmem path in that case. */
}
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
-
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -986,9 +946,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
uint32_t write_domain = args->write_domain;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
/* Only handle setting domains to types used by the CPU. */
if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
@@ -1042,9 +999,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret = 0;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1080,13 +1034,18 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
unsigned long addr;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->filp) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINVAL;
+ }
+
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
@@ -1151,10 +1110,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- if (obj->tiling_mode == I915_TILING_NONE)
- ret = i915_gem_object_put_fence(obj);
- else
- ret = i915_gem_object_get_fence(obj, NULL);
+ if (!obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ ret = i915_gem_object_get_fence(obj);
if (ret)
goto unlock;
@@ -1308,9 +1267,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
struct drm_i915_gem_object *obj;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1368,14 +1324,10 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-
-static int
+int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
{
@@ -1384,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
struct inode *inode;
struct page *page;
+ if (obj->pages || obj->sg_table)
+ return 0;
+
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
@@ -1425,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
+ if (!obj->pages)
+ return;
+
BUG_ON(obj->madv == __I915_MADV_PURGED);
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1473,7 +1431,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
- obj->last_fenced_ring = ring;
/* Bump MRU to take account of the delayed flush */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@ -1512,15 +1469,11 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (obj->pin_count != 0)
- list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
obj->ring = NULL;
- obj->last_fenced_ring = NULL;
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
@@ -1546,6 +1499,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
inode = obj->base.filp->f_path.dentry->d_inode;
shmem_truncate_range(inode, 0, (loff_t)-1);
+ if (obj->base.map_list.map)
+ drm_gem_free_mmap_offset(&obj->base);
+
obj->madv = __I915_MADV_PURGED;
}
@@ -1711,30 +1667,29 @@ static void i915_gem_reset_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- struct drm_i915_gem_object *obj = reg->obj;
- if (!obj)
- continue;
+ i915_gem_write_fence(dev, i, NULL);
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
+ if (reg->obj)
+ i915_gem_object_fence_lost(reg->obj);
- reg->obj->fence_reg = I915_FENCE_REG_NONE;
- reg->obj->fenced_gpu_access = false;
- reg->obj->last_fenced_seqno = 0;
- reg->obj->last_fenced_ring = NULL;
- i915_gem_clear_fence_reg(dev, reg);
+ reg->pin_count = 0;
+ reg->obj = NULL;
+ INIT_LIST_HEAD(&reg->lru_list);
}
+
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_lists(dev_priv, ring);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
@@ -1839,24 +1794,11 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj, *next;
-
- /* We must be careful that during unbind() we do not
- * accidentally infinitely recurse into retire requests.
- * Currently:
- * retire -> free -> unbind -> wait -> retire_ring
- */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.deferred_free_list,
- mm_list)
- i915_gem_free_object_tail(obj);
- }
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_retire_requests_ring(ring);
}
static void
@@ -1864,6 +1806,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
+ struct intel_ring_buffer *ring;
bool idle;
int i;
@@ -1883,9 +1826,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
* objects indefinitely.
*/
idle = true;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
@@ -1907,20 +1848,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire)
+static int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- u32 ier;
- int ret = 0;
-
- BUG_ON(seqno == 0);
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
@@ -1935,6 +1866,20 @@ i915_wait_request(struct intel_ring_buffer *ring,
return recovery_complete ? -EIO : -EAGAIN;
}
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret = 0;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@@ -1948,54 +1893,67 @@ i915_wait_request(struct intel_ring_buffer *ring,
return ret;
}
- seqno = request->seqno;
+ BUG_ON(seqno != request->seqno);
}
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- if (HAS_PCH_SPLIT(ring->dev))
- ier = I915_READ(DEIER) | I915_READ(GTIER);
- else
- ier = I915_READ(IER);
- if (!ier) {
- DRM_ERROR("something (likely vbetool) disabled "
- "interrupts, re-enabling\n");
- ring->dev->driver->irq_preinstall(ring->dev);
- ring->dev->driver->irq_postinstall(ring->dev);
- }
+ return ret;
+}
- trace_i915_gem_request_wait_begin(ring, seqno);
-
- ring->waiting_seqno = seqno;
- if (ring->irq_get(ring)) {
- if (dev_priv->mm.interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
-
- ring->irq_put(ring);
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
- ret = -EBUSY;
- ring->waiting_seqno = 0;
-
- trace_i915_gem_request_wait_end(ring, seqno);
- }
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ bool interruptible)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+ return 0;
+
+ trace_i915_gem_request_wait_begin(ring, seqno);
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
+
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ EXIT_COND);
+ else
+ wait_event(ring->irq_queue, EXIT_COND);
+
+ ring->irq_put(ring);
+ trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+ return ret;
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0 && do_retire)
- i915_gem_retire_requests_ring(ring);
-
return ret;
}
@@ -2017,15 +1975,58 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
- true);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
+ i915_gem_retire_requests_ring(obj->ring);
}
return 0;
}
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = to->sync_to(to, from, seqno);
+ if (!ret)
+ from->sync_seqno[idx] = seqno;
+
+ return ret;
+}
+
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
@@ -2068,7 +2069,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
ret = i915_gem_object_finish_gpu(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
@@ -2095,16 +2096,18 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
trace_i915_gem_object_unbind(obj);
- i915_gem_gtt_unbind_object(obj);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_unbind_object(obj);
if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0;
}
+ i915_gem_gtt_finish_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2145,7 +2148,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2159,208 +2162,201 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
- do_retire);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i;
/* Flush everything onto the inactive list. */
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_ring_idle(ring);
if (ret)
return ret;
+
+ /* Is the device fubar? */
+ if (WARN_ON(!list_empty(&ring->gpu_write_list)))
+ return -EBUSY;
}
return 0;
}
-static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
}
-static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_965_0 + reg * 8);
}
-static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- u32 fence_reg, val, pitch_val;
- int tile_width;
-
- if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size))
- return -EINVAL;
+ u32 val;
- if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
-
- fence_reg = obj->fence_reg;
- if (fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
- else
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ int pitch_val;
+ int tile_width;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size);
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, fence_reg);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
} else
- I915_WRITE(fence_reg, val);
+ val = 0;
- return 0;
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
-static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint32_t val;
- uint32_t pitch_val;
- if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size))
- return -EINVAL;
-
- pitch_val = obj->stride / 128;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ uint32_t pitch_val;
+
+ WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
- } else
- I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 5:
+ case 4: i965_write_fence_reg(dev, reg, obj); break;
+ case 3: i915_write_fence_reg(dev, reg, obj); break;
+ case 2: i830_write_fence_reg(dev, reg, obj); break;
+ default: break;
+ }
+}
- return 0;
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
}
-static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
{
- return i915_seqno_passed(ring->get_seqno(ring), seqno);
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
}
static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2369,18 +2365,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->fenced_gpu_access = false;
}
- if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- obj->last_fenced_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (obj->last_fenced_seqno) {
+ ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+ if (ret)
+ return ret;
obj->last_fenced_seqno = 0;
- obj->last_fenced_ring = NULL;
}
/* Ensure that all CPU reads are completed before installing a fence
@@ -2395,34 +2385,29 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
-
- ret = i915_gem_object_flush_fence(obj, NULL);
+ ret = i915_gem_object_flush_fence(obj);
if (ret)
return ret;
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
- i915_gem_clear_fence_reg(obj->base.dev,
- &dev_priv->fence_regs[obj->fence_reg]);
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
- obj->fence_reg = I915_FENCE_REG_NONE;
- }
+ i915_gem_object_update_fence(obj,
+ &dev_priv->fence_regs[obj->fence_reg],
+ false);
+ i915_gem_object_fence_lost(obj);
return 0;
}
static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev,
- struct intel_ring_buffer *pipelined)
+i915_find_fence_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg, *first, *avail;
+ struct drm_i915_fence_reg *reg, *avail;
int i;
/* First try to find a free reg */
@@ -2440,204 +2425,77 @@ i915_find_fence_reg(struct drm_device *dev,
return NULL;
/* None available, try to steal one or wait for a user to finish */
- avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
if (reg->pin_count)
continue;
- if (first == NULL)
- first = reg;
-
- if (!pipelined ||
- !reg->obj->last_fenced_ring ||
- reg->obj->last_fenced_ring == pipelined) {
- avail = reg;
- break;
- }
+ return reg;
}
- if (avail == NULL)
- avail = first;
-
- return avail;
+ return NULL;
}
/**
- * i915_gem_object_get_fence - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
- * @pipelined: ring on which to queue the change, or NULL for CPU access
- * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
- *
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
*/
int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
- /* XXX disable pipelining. There are bugs. Shocking. */
- pipelined = NULL;
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_flush_fence(obj);
+ if (ret)
+ return ret;
+ }
/* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
-
- if (obj->tiling_changed) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
-
- if (pipelined) {
- reg->setup_seqno =
- i915_gem_next_request_seqno(pipelined);
- obj->last_fenced_seqno = reg->setup_seqno;
- obj->last_fenced_ring = pipelined;
- }
-
- goto update;
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
}
+ } else if (enable) {
+ reg = i915_find_fence_reg(dev);
+ if (reg == NULL)
+ return -EDEADLK;
- if (!pipelined) {
- if (reg->setup_seqno) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- reg->setup_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
- reg->setup_seqno = 0;
- }
- } else if (obj->last_fenced_ring &&
- obj->last_fenced_ring != pipelined) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
+ ret = i915_gem_object_flush_fence(old);
if (ret)
return ret;
- }
-
- return 0;
- }
-
- reg = i915_find_fence_reg(dev, pipelined);
- if (reg == NULL)
- return -EDEADLK;
-
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (reg->obj) {
- struct drm_i915_gem_object *old = reg->obj;
-
- drm_gem_object_reference(&old->base);
-
- if (old->tiling_mode)
- i915_gem_release_mmap(old);
- ret = i915_gem_object_flush_fence(old, pipelined);
- if (ret) {
- drm_gem_object_unreference(&old->base);
- return ret;
+ i915_gem_object_fence_lost(old);
}
+ } else
+ return 0;
- if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- old->fence_reg = I915_FENCE_REG_NONE;
- old->last_fenced_ring = pipelined;
- old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
-
- drm_gem_object_unreference(&old->base);
- } else if (obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- reg->obj = obj;
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
- obj->fence_reg = reg - dev_priv->fence_regs;
- obj->last_fenced_ring = pipelined;
-
- reg->setup_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
- obj->last_fenced_seqno = reg->setup_seqno;
-
-update:
- obj->tiling_changed = false;
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = sandybridge_write_fence_reg(obj, pipelined);
- break;
- case 5:
- case 4:
- ret = i965_write_fence_reg(obj, pipelined);
- break;
- case 3:
- ret = i915_write_fence_reg(obj, pipelined);
- break;
- case 2:
- ret = i830_write_fence_reg(obj, pipelined);
- break;
- }
-
- return ret;
-}
-
-/**
- * i915_gem_clear_fence_reg - clear out fence register info
- * @obj: object to clear
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj.
- */
-static void
-i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t fence_reg = reg - dev_priv->fence_regs;
-
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
- break;
- case 5:
- case 4:
- I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
- break;
- case 3:
- if (fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
- else
- case 2:
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
-
- I915_WRITE(fence_reg, 0);
- break;
- }
+ i915_gem_object_update_fence(obj, reg, enable);
+ obj->fence_dirty = false;
- list_del_init(&reg->lru_list);
- reg->obj = NULL;
- reg->setup_seqno = 0;
- reg->pin_count = 0;
+ return 0;
}
/**
@@ -2749,7 +2607,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return ret;
}
- ret = i915_gem_gtt_bind_object(obj);
+ ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
@@ -2761,6 +2619,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free;
}
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
@@ -2878,6 +2739,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2918,6 +2780,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_read_domains,
old_write_domain);
+ /* And bump the LRU for this access */
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
return 0;
}
@@ -2953,7 +2819,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
}
- i915_gem_gtt_rebind_object(obj, cache_level);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, cache_level);
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
@@ -2990,11 +2857,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
* any flushes to be pipelined (for pageflips).
- *
- * For the display plane, we want to be in the GTT but out of any write
- * domains. So in many ways this looks like set_to_gtt_domain() apart from the
- * ability to pipeline the waits, pinning and any additional subtleties
- * that may differentiate the display plane from ordinary buffers.
*/
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -3009,8 +2871,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return ret;
if (pipelined != obj->ring) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret == -ERESTARTSYS)
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
return ret;
}
@@ -3082,7 +2944,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
-static int
+int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
@@ -3095,17 +2957,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
+ if (write || obj->pending_gpu_write) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
i915_gem_object_flush_gtt_write_domain(obj);
- /* If we have a partially-valid cache of the object in the CPU,
- * finish invalidating it and free the per-page flags.
- */
- i915_gem_object_set_to_full_cpu_read_domain(obj);
-
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3136,113 +2995,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-/**
- * Moves the object from a partially CPU read to a full one.
- *
- * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
- * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
- */
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
-{
- if (!obj->page_cpu_valid)
- return;
-
- /* If we're partially in the CPU read domain, finish moving it in.
- */
- if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
- int i;
-
- for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
- if (obj->page_cpu_valid[i])
- continue;
- drm_clflush_pages(obj->pages + i, 1);
- }
- }
-
- /* Free the page_cpu_valid mappings which are now stale, whether
- * or not we've got I915_GEM_DOMAIN_CPU.
- */
- kfree(obj->page_cpu_valid);
- obj->page_cpu_valid = NULL;
-}
-
-/**
- * Set the CPU read domain on a range of the object.
- *
- * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
- * not entirely valid. The page_cpu_valid member of the object flags which
- * pages have been flushed, and will be respected by
- * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
- * of the whole object.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset, uint64_t size)
-{
- uint32_t old_read_domains;
- int i, ret;
-
- if (offset == 0 && size == obj->base.size)
- return i915_gem_object_set_to_cpu_domain(obj, 0);
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
-
- i915_gem_object_flush_gtt_write_domain(obj);
-
- /* If we're already fully in the CPU read domain, we're done. */
- if (obj->page_cpu_valid == NULL &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
- return 0;
-
- /* Otherwise, create/clear the per-page CPU read domain flag if we're
- * newly adding I915_GEM_DOMAIN_CPU
- */
- if (obj->page_cpu_valid == NULL) {
- obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj->page_cpu_valid == NULL)
- return -ENOMEM;
- } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
-
- /* Flush the cache on any pages that are still invalid from the CPU's
- * perspective.
- */
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
- i++) {
- if (obj->page_cpu_valid[i])
- continue;
-
- drm_clflush_pages(obj->pages + i, 1);
-
- obj->page_cpu_valid[i] = 1;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
-
- old_read_domains = obj->base.read_domains;
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->base.write_domain);
-
- return 0;
-}
-
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3280,28 +3032,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- /* And wait for the seqno passing without holding any locks and
- * causing extra latency for others. This is safe as the irq
- * generation is designed to be run atomically and so is
- * lockless.
- */
- if (ring->irq_get(ring)) {
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->irq_put(ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000)) {
- ret = -EBUSY;
- }
- }
-
+ ret = __wait_seqno(ring, seqno, true);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3313,12 +3044,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
- WARN_ON(i915_verify_lists(dev));
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3343,34 +3071,23 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ret;
}
- if (obj->pin_count++ == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.pinned_list);
- }
+ if (!obj->has_global_gtt_mapping && map_and_fenceable)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ obj->pin_count++;
obj->pin_mappable |= map_and_fenceable;
- WARN_ON(i915_verify_lists(dev));
return 0;
}
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- WARN_ON(i915_verify_lists(dev));
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
- if (--obj->pin_count == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.inactive_list);
+ if (--obj->pin_count == 0)
obj->pin_mappable = false;
- }
- WARN_ON(i915_verify_lists(dev));
}
int
@@ -3494,20 +3211,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
- } else if (obj->ring->outstanding_lazy_request ==
- obj->last_rendering_seqno) {
- struct drm_i915_gem_request *request;
-
- /* This ring is not being cleared by active usage,
- * so emit a request to do so.
- */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request) {
- ret = i915_add_request(obj->ring, NULL, request);
- if (ret)
- kfree(request);
- } else
- ret = -ENOMEM;
+ } else {
+ ret = i915_gem_check_olr(obj->ring,
+ obj->last_rendering_seqno);
}
/* Update the active list for the hardware's current position.
@@ -3643,46 +3349,42 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- ret = i915_gem_object_unbind(obj);
- if (ret == -ERESTARTSYS) {
- list_move(&obj->mm_list,
- &dev_priv->mm.deferred_free_list);
- return;
- }
trace_i915_gem_object_destroy(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg_table);
+
+ if (obj->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
+
+ obj->pin_count = 0;
+ if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ WARN_ON(i915_gem_object_unbind(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
if (obj->base.map_list.map)
drm_gem_free_mmap_offset(&obj->base);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
-
- while (obj->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- if (obj->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
- i915_gem_free_object_tail(obj);
-}
-
int
i915_gem_idle(struct drm_device *dev)
{
@@ -3696,20 +3398,16 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev, false);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev, false);
i915_gem_reset_fences(dev);
@@ -3747,9 +3445,9 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -3787,21 +3485,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk = I915_READ(GAM_ECOCHK);
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
- GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -3845,14 +3549,80 @@ cleanup_render_ring:
return ret;
}
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+ int ret;
+
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ mutex_lock(&dev->struct_mutex);
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ } else {
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch
+ * page. There are a number of places where the hardware
+ * apparently prefetches past the end of the object, and we've
+ * seen multiple hangs with the GPU head pointer stuck in a
+ * batchbuffer bound at the last page of the aperture. One page
+ * should be enough to keep any prefetching inside of the
+ * aperture.
+ */
+ i915_gem_init_global_gtt(dev, 0, mappable_size,
+ gtt_size);
+ }
+
+ ret = i915_gem_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return ret;
+ }
+
+ /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->dri1.allow_batchbuffer = 1;
+ return 0;
+}
+
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ intel_cleanup_ring_buffer(ring);
}
int
@@ -3860,7 +3630,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -3882,10 +3652,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
- BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
- }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -3944,9 +3710,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
@@ -3958,12 +3722,8 @@ i915_gem_load(struct drm_device *dev)
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
- u32 tmp = I915_READ(MI_ARB_STATE);
- if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
- /* arb state is a masked write, so set bit + bit in mask */
- tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
- I915_WRITE(MI_ARB_STATE, tmp);
- }
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -3978,9 +3738,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
- }
+ i915_gem_reset_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4268,7 +4026,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev, true) == 0)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index cc93cac242d..a4f6aaabca9 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
}
}
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed pinned %p\n", obj);
- err++;
- break;
- } else if (!obj->pin_count || obj->active ||
- (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
- DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
- obj,
- obj->pin_count, obj->active,
- obj->base.write_domain);
- err++;
- }
- }
-
return warned = err;
}
#endif /* WATCH_INACTIVE */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 00000000000..8e269178d6a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "i915_drv.h"
+#include <linux/dma-buf.h>
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *sg = NULL;
+ int ret;
+ int nents;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret)
+ goto out;
+ }
+
+ /* link the pages into an SG then map the sg */
+ sg = drm_prime_pages_to_sg(obj->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+
+ if (obj->base.export_dma_buf == dma_buf) {
+ /* drop the reference on the export fd holds */
+ obj->base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ }
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static const struct dma_buf_ops i915_dmabuf_ops = {
+ .map_dma_buf = i915_gem_map_dma_buf,
+ .unmap_dma_buf = i915_gem_unmap_dma_buf,
+ .release = i915_gem_dmabuf_release,
+ .kmap = i915_gem_dmabuf_kmap,
+ .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+ .kunmap = i915_gem_dmabuf_kunmap,
+ .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ return dma_buf_export(obj, &i915_dmabuf_ops,
+ obj->base.size, 0600);
+}
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct drm_i915_gem_object *obj;
+ int npages;
+ int size;
+ int ret;
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &i915_dmabuf_ops) {
+ obj = dma_buf->priv;
+ /* is it from our device? */
+ if (obj->base.dev == dev) {
+ drm_gem_object_reference(&obj->base);
+ return &obj->base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ size = dma_buf->size;
+ npages = size / PAGE_SIZE;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail_unmap;
+ }
+
+ ret = drm_gem_private_object_init(dev, &obj->base, size);
+ if (ret) {
+ kfree(obj);
+ goto fail_unmap;
+ }
+
+ obj->sg_table = sg;
+ obj->base.import_attach = attach;
+
+ return &obj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 21a82710f4b..ae7c24e12e5 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -35,6 +35,9 @@
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
+ if (obj->pin_count)
+ return false;
+
list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj->base.write_domain || obj->pin_count)
+ if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (obj->pin_count)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain || obj->pin_count)
+ if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -166,8 +166,9 @@ int
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct drm_i915_gem_object *obj, *next;
bool lists_empty;
+ int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
@@ -177,29 +178,24 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev, true);
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ i915_gem_retire_requests(dev);
- return i915_gem_evict_inactive(dev, purgeable_only);
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj, *next;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ /* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- int ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index de431942ded..974a9f1068a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -266,6 +266,12 @@ eb_destroy(struct eb_objects *eb)
kfree(eb);
}
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+ return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
@@ -273,6 +279,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_i915_obj;
uint32_t target_offset;
int ret = -EINVAL;
@@ -281,7 +288,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(target_obj == NULL))
return -ENOENT;
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ target_i915_obj = to_intel_bo(target_obj);
+ target_offset = target_i915_obj->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
@@ -352,11 +360,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && in_atomic())
+ return -EFAULT;
+
reloc->delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ if (use_cpu_reloc(obj)) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ return ret;
+
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
@@ -365,11 +381,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
- /* We can't wait for rendering with pagefaults disabled */
- if (obj->active && in_atomic())
- return -EFAULT;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -383,6 +399,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
io_mapping_unmap_atomic(reloc_page);
}
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
+ }
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -393,30 +419,46 @@ static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+ struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- int i, ret;
+ int remain, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc)))
+ remain = entry->relocation_count;
+ while (remain) {
+ struct drm_i915_gem_relocation_entry *r = stack_reloc;
+ int count = remain;
+ if (count > ARRAY_SIZE(stack_reloc))
+ count = ARRAY_SIZE(stack_reloc);
+ remain -= count;
+
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
return -EFAULT;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
- if (ret)
- return ret;
+ do {
+ u64 offset = r->presumed_offset;
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset)))
- return -EFAULT;
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ if (ret)
+ return ret;
+
+ if (r->presumed_offset != offset &&
+ __copy_to_user_inatomic(&user_relocs->presumed_offset,
+ &r->presumed_offset,
+ sizeof(r->presumed_offset))) {
+ return -EFAULT;
+ }
+
+ user_relocs++;
+ r++;
+ } while (--count);
}
return 0;
+#undef N_RELOC
}
static int
@@ -465,6 +507,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
+need_reloc_mappable(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(obj);
+}
+
+static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
@@ -477,8 +526,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
@@ -486,18 +534,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- if (obj->tiling_mode) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
+ if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
- i915_gem_object_pin_fence(obj);
- } else {
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto err_unpin;
- }
+
obj->pending_fenced_gpu_access = true;
}
}
@@ -535,8 +578,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
@@ -576,8 +618,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
@@ -798,64 +839,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
-static bool
-intel_enable_semaphores(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- return 0;
-
- if (i915_semaphores >= 0)
- return i915_semaphores;
-
- /* Disable semaphores on SNB */
- if (INTEL_INFO(dev)->gen == 6)
- return 0;
-
- return 1;
-}
-
-static int
-i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
-{
- struct intel_ring_buffer *from = obj->ring;
- u32 seqno;
- int ret, idx;
-
- if (from == NULL || to == from)
- return 0;
-
- /* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (!intel_enable_semaphores(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
-
- idx = intel_ring_sync_index(from, to);
-
- seqno = obj->last_rendering_seqno;
- if (seqno <= from->sync_seqno[idx])
- return 0;
-
- if (seqno == from->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- ret = i915_add_request(from, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- seqno = request->seqno;
- }
-
- from->sync_seqno[idx] = seqno;
-
- return to->sync_to(to, from, seqno - 1);
-}
-
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@@ -917,7 +900,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
}
@@ -955,7 +938,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (fault_in_pages_readable(ptr, length))
+ if (fault_in_multipages_readable(ptr, length))
return -EFAULT;
}
@@ -984,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
- intel_mark_busy(ring->dev, obj);
+ if (obj->pin_count) /* check for potential scanout */
+ intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
+
+ intel_mark_busy(ring->dev, NULL);
}
static void
@@ -1078,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[BCS];
break;
default:
@@ -1096,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
+ if (!intel_ring_initialized(ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
@@ -1133,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
+
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
@@ -1242,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
goto err;
+ i915_gem_retire_requests(dev);
BUG_ON(ring->sync_seqno[i]);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61f411..9fd25a43553 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
- }
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
@@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
}
ppgtt->pt_dma_addr[i] = pt_addr;
- } else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+ }
}
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
@@ -269,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
i915_ppgtt_insert_sg_entries(ppgtt,
@@ -319,7 +323,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev, false)) {
+ if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -346,48 +350,39 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
- i915_gem_gtt_rebind_object(obj, obj->cache_level);
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
}
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
- int ret;
-
- if (dev_priv->mm.gtt->needs_dmar) {
- ret = intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- if (ret != 0)
- return ret;
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
- return 0;
+ if (dev_priv->mm.gtt->needs_dmar)
+ return intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ else
+ return 0;
}
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ intel_gtt_insert_sg_entries(obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
@@ -399,19 +394,26 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
+
+ obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+
+ obj->has_global_gtt_mapping = 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible;
interruptible = do_idling(dev_priv);
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
-
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
@@ -419,3 +421,23 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
undo_idling(dev_priv, interruptible);
}
+
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Substract the guard page ... */
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
+ dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+
+ /* ... but ensure that we clear the entire range. */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 00000000000..ada2e90a2a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+#define PTE_ADDRESS_MASK 0xfffff000
+#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+#define PTE_MAPPING_TYPE_MASK (3 << 1)
+#define PTE_VALID (1 << 0)
+
+/**
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
+ * @dev: drm device
+ * @offset: address to translate
+ *
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
+ */
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
+ } else {
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+ }
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
+
+ return base + offset;
+}
+
+static void i915_warn_stolen(struct drm_device *dev)
+{
+ DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
+ DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+}
+
+static void i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
+
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
+
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
+
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
+ }
+
+ dev_priv->cfb_size = size;
+
+ dev_priv->compressed_fb = compressed_fb;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ I915_WRITE(FBC_CFB_BASE, cfb_base);
+ I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
+}
+
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->compressed_llb)
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (I915_HAS_FBC(dev) && i915_powersave) {
+ int cfb_size;
+
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1a930666598..b964df51cec 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is cleared.
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
*/
- i915_gem_release_mmap(obj);
obj->map_and_fenceable =
obj->gtt_space == NULL ||
@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
if (ret == 0) {
- obj->tiling_changed = true;
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
+
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 13b028994b2..0e72abb9f70 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
+#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
@@ -181,7 +182,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
(unsigned long)request);
}
-drm_ioctl_compat_t *i915_compat_ioctls[] = {
+static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,6 +190,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
+#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -217,3 +219,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afd4e03e337..cc4a6330761 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sysrq.h>
#include <linux/slab.h>
#include "drmP.h"
@@ -35,35 +37,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-#define MAX_NOPID ((u32)~0)
-
-/**
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
- */
-#define I915_INTERRUPT_ENABLE_FIX \
- (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
-/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
-
-#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
- PIPE_VBLANK_INTERRUPT_STATUS)
-
-#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
- PIPE_VBLANK_INTERRUPT_ENABLE)
-
-#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
- DRM_I915_VBLANK_PIPE_B)
-
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -118,6 +91,10 @@ void intel_enable_asle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
+ /* FIXME: opregion/asle for VLV */
+ if (IS_VALLEYVIEW(dev))
+ return;
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
@@ -354,15 +331,12 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno;
if (ring->obj == NULL)
return;
- seqno = ring->get_seqno(ring);
- trace_i915_gem_request_complete(ring, seqno);
+ trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
- ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
@@ -424,13 +398,145 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void pch_irq_handler(struct drm_device *dev)
+static void snb_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+
+ if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
+ GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+
+ if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_ERROR_INTERRUPT)) {
+ DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+ i915_handle_error(dev, false);
+ }
+}
+
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
+{
+ unsigned long flags;
+
+ /*
+ * IIR bits should never already be set because IMR should
+ * prevent an interrupt from being shown in IIR. The warning
+ * displays a case where we've unsafely cleared
+ * dev_priv->pm_iir. Although missing an interrupt of the same
+ * type is not a problem, it displays a problem in the logic.
+ *
+ * The mask bit in IMR is cleared by rps_work.
+ */
+
+ spin_lock_irqsave(&dev_priv->rps_lock, flags);
+ WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+ dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->rps_work);
+}
+
+static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 pch_iir;
+ u32 iir, gt_iir, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long irqflags;
int pipe;
+ u32 pipe_stats[I915_MAX_PIPES];
+ u32 vblank_status;
+ int vblank = 0;
+ bool blc_event;
- pch_iir = I915_READ(SDEIIR);
+ atomic_inc(&dev_priv->irq_received);
+
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS;
+
+ while (true) {
+ iir = I915_READ(VLV_IIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+ goto out;
+
+ ret = IRQ_HANDLED;
+
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+
+ if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 0);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 1);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ I915_WRITE(VLV_IIR, iir);
+ }
+
+out:
+ return ret;
+}
+
+static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -471,91 +577,77 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- struct drm_i915_master_private *master_priv;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
- de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
-
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
- goto done;
-
- ret = IRQ_HANDLED;
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
+ if (gt_iir) {
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
}
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
-
- if (de_iir & DE_GSE_IVB)
- intel_opregion_gse_intr(dev);
-
- if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
+ de_iir = I915_READ(DEIIR);
+ if (de_iir) {
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ }
- if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
- }
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ u32 pch_iir = I915_READ(SDEIIR);
- if (de_iir & DE_PIPEA_VBLANK_IVB)
- drm_handle_vblank(dev, 0);
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ pch_irq_handler(dev, pch_iir);
- if (de_iir & DE_PIPEB_VBLANK_IVB)
- drm_handle_vblank(dev, 1);
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
- /* check event from PCH */
- if (de_iir & DE_PCH_EVENT_IVB) {
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
}
- if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
}
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
return ret;
}
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -563,14 +655,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
- struct drm_i915_master_private *master_priv;
- u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
atomic_inc(&dev_priv->irq_received);
- if (IS_GEN6(dev))
- bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
-
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -592,19 +679,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ret = IRQ_HANDLED;
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
+ if (IS_GEN5(dev))
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -629,7 +707,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ pch_irq_handler(dev, pch_iir);
}
if (de_iir & DE_PCU_EVENT) {
@@ -637,25 +715,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
i915_handle_rps_change(dev);
}
- if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- /*
- * IIR bits should never already be set because IMR should
- * prevent an interrupt from being shown in IIR. The warning
- * displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
- * type is not a problem, it displays a problem in the logic.
- *
- * The mask bit in IMR is cleared by rps_work.
- */
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
- }
+ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -691,7 +752,7 @@ static void i915_error_work_func(struct work_struct *work)
if (atomic_read(&dev_priv->mm.wedged)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i915_reset(dev, GRDOM_RENDER)) {
+ if (!i915_reset(dev)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
@@ -727,7 +788,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ src->has_global_gtt_mapping) {
void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
@@ -782,10 +844,11 @@ i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
-static void
-i915_error_state_free(struct drm_device *dev,
- struct drm_i915_error_state *error)
+void
+i915_error_state_free(struct kref *error_ref)
{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
int i;
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
@@ -798,37 +861,56 @@ i915_error_state_free(struct drm_device *dev,
kfree(error->overlay);
kfree(error);
}
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
-static u32 capture_bo_list(struct drm_i915_error_buffer *err,
- int count,
- struct list_head *head)
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
list_for_each_entry(obj, head, mm_list) {
- err->size = obj->base.size;
- err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
- err->gtt_offset = obj->gtt_offset;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (obj->pin_count > 0)
- err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
- err->tiling = obj->tiling_mode;
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
- err->cache_level = obj->cache_level;
-
+ capture_bo(err++, obj);
if (++i == count)
break;
+ }
- err++;
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, gtt_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
}
return i;
@@ -901,7 +983,6 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -910,6 +991,7 @@ static void i915_record_ring_state(struct drm_device *dev,
}
if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
@@ -919,11 +1001,13 @@ static void i915_record_ring_state(struct drm_device *dev,
error->bbaddr = I915_READ64(BB_ADDR);
}
} else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
}
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
@@ -938,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
- if (ring->obj == NULL)
- continue;
-
+ for_each_ring(ring, dev_priv, i) {
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
@@ -1013,8 +1093,19 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
+ kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@@ -1034,8 +1125,9 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- i++;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ i++;
error->pinned_bo_count = i - error->active_bo_count;
error->active_bo = NULL;
@@ -1050,15 +1142,15 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo)
error->active_bo_count =
- capture_bo_list(error->active_bo,
- error->active_bo_count,
- &dev_priv->mm.active_list);
+ capture_active_bo(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
if (error->pinned_bo)
error->pinned_bo_count =
- capture_bo_list(error->pinned_bo,
- error->pinned_bo_count,
- &dev_priv->mm.pinned_list);
+ capture_pinned_bo(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.gtt_list);
do_gettimeofday(&error->time);
@@ -1073,7 +1165,7 @@ static void i915_capture_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ i915_error_state_free(&error->ref);
}
void i915_destroy_error_state(struct drm_device *dev)
@@ -1088,7 +1180,7 @@ void i915_destroy_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ kref_put(&error->ref, i915_error_state_free);
}
#else
#define i915_capture_error_state(x)
@@ -1103,33 +1195,26 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!eir)
return;
- printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
- eir);
+ pr_err("render error detected, EIR: 0x%08x\n", eir);
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
@@ -1138,53 +1223,42 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- printk(KERN_ERR "memory refresh error:\n");
+ pr_err("memory refresh error:\n");
for_each_pipe(pipe)
- printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+ pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
- printk(KERN_ERR "instruction error\n");
- printk(KERN_ERR " INSTPM: 0x%08x\n",
- I915_READ(INSTPM));
+ pr_err("instruction error\n");
+ pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD));
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
+ pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
@@ -1217,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
@@ -1228,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- wake_up_all(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- wake_up_all(&dev_priv->ring[BCS].irq_queue);
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1265,7 +1338,8 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
- stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+ stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+ obj->gtt_offset;
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
@@ -1281,248 +1355,6 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
}
-static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv;
- u32 iir, new_iir;
- u32 pipe_stats[I915_MAX_PIPES];
- u32 vblank_status;
- int vblank = 0;
- unsigned long irqflags;
- int irq_received;
- int ret = IRQ_NONE, pipe;
- bool blc_event = false;
-
- atomic_inc(&dev_priv->irq_received);
-
- iir = I915_READ(IIR);
-
- if (INTEL_INFO(dev)->gen >= 4)
- vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
- else
- vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
-
- for (;;) {
- irq_received = iir != 0;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
-
- for_each_pipe(pipe) {
- int reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
- }
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- if (!irq_received)
- break;
-
- ret = IRQ_HANDLED;
-
- /* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
- }
-
- I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
-
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 0);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 0);
- }
-
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 1);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 1);
- }
-
- for_each_pipe(pipe) {
- if (pipe_stats[pipe] & vblank_status &&
- drm_handle_vblank(dev, pipe)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
- }
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
- }
-
-
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
-
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- iir = new_iir;
- }
-
- return ret;
-}
-
-static int i915_emit_irq(struct drm_device * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- i915_kernel_lost_context(dev);
-
- DRM_DEBUG_DRIVER("\n");
-
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- return dev_priv->counter;
-}
-
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
-
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
- } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
- ret = -EBUSY;
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
- }
-
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
-
- if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -1544,7 +1376,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1575,8 +1407,34 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_enable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ }
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ I915_WRITE(VLV_IMR, imr);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1592,8 +1450,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM,
- INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1618,63 +1475,30 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_disable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (pipe * 5));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-/* Set the vblank monitor pipe
- */
-int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl &= ~PIPEA_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl &= ~PIPEB_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
}
-
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
+ I915_WRITE(VLV_IMR, imr);
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static u32
@@ -1689,11 +1513,9 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
- if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
- DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
- ring->name,
- ring->waiting_seqno,
- ring->get_seqno(ring));
+ if (waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
wake_up_all(&ring->irq_queue);
*err = true;
}
@@ -1716,6 +1538,35 @@ static bool kick_ring(struct intel_ring_buffer *ring)
return false;
}
+static bool i915_hangcheck_hung(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->hangcheck_count++ > 1) {
+ bool hung = true;
+
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+
+ if (!IS_GEN2(dev)) {
+ struct intel_ring_buffer *ring;
+ int i;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ for_each_ring(ring, dev_priv, i)
+ hung &= !kick_ring(ring);
+ }
+
+ return hung;
+ }
+
+ return false;
+}
+
/**
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. The first time this is called we simply record
@@ -1726,19 +1577,31 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
- bool err = false;
+ uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ struct intel_ring_buffer *ring;
+ bool err = false, idle;
+ int i;
if (!i915_enable_hangcheck)
return;
+ memset(acthd, 0, sizeof(acthd));
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ idle &= i915_hangcheck_ring_idle(ring, &err);
+ acthd[i] = intel_ring_get_active_head(ring);
+ }
+
/* If all work is done then ACTHD clearly hasn't advanced. */
- if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
- dev_priv->hangcheck_count = 0;
- if (err)
+ if (idle) {
+ if (err) {
+ if (i915_hangcheck_hung(dev))
+ return;
+
goto repeat;
+ }
+
+ dev_priv->hangcheck_count = 0;
return;
}
@@ -1749,47 +1612,16 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1);
}
- acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
- acthd_bsd = HAS_BSD(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
- acthd_blt = HAS_BLT(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
- if (dev_priv->last_acthd == acthd &&
- dev_priv->last_acthd_bsd == acthd_bsd &&
- dev_priv->last_acthd_blt == acthd_blt &&
+ if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
- if (dev_priv->hangcheck_count++ > 1) {
- DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
-
- if (!IS_GEN2(dev)) {
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- if (kick_ring(&dev_priv->ring[RCS]))
- goto repeat;
-
- if (HAS_BSD(dev) &&
- kick_ring(&dev_priv->ring[VCS]))
- goto repeat;
-
- if (HAS_BLT(dev) &&
- kick_ring(&dev_priv->ring[BCS]))
- goto repeat;
- }
-
+ if (i915_hangcheck_hung(dev))
return;
- }
} else {
dev_priv->hangcheck_count = 0;
- dev_priv->last_acthd = acthd;
- dev_priv->last_acthd_bsd = acthd_bsd;
- dev_priv->last_acthd_blt = acthd_blt;
+ memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1;
}
@@ -1808,10 +1640,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
@@ -1832,6 +1660,38 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ /* VLV magic */
+ I915_WRITE(VLV_IMR, 0);
+ I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+ I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+ /* and GT */
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ I915_WRITE(DPINVGTT, 0xff);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
@@ -1861,13 +1721,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
@@ -1884,8 +1737,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
if (IS_GEN6(dev))
render_irqs =
GT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
else
render_irqs =
GT_USER_INTERRUPT |
@@ -1930,26 +1783,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB;
+ u32 display_mask =
+ DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+ DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB;
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
- DE_PIPEB_VBLANK_IVB);
+ I915_WRITE(DEIER,
+ display_mask |
+ DE_PIPEC_VBLANK_IVB |
+ DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
dev_priv->gt_irq_mask = ~0;
@@ -1957,8 +1808,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1978,15 +1829,496 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i915_driver_irq_preinstall(struct drm_device * dev)
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 render_irqs;
+ u32 enable_mask;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u16 msid;
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ dev_priv->irq_mask = ~enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /* Hack for broken MSIs on VLV */
+ pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
+ pci_read_config_word(dev->pdev, 0x98, &msid);
+ msid &= 0xff; /* mask out delivery bits */
+ msid |= (1<<14);
+ pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(PIPESTAT(0), 0xffff);
+ I915_WRITE(PIPESTAT(1), 0xffff);
+ POSTING_READ(VLV_IER);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+
+ render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+ GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BLT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_RENDER_CS_ERROR_INTERRUPT |
+ GT_SYNC_STATUS |
+ GT_USER_INTERRUPT;
+
+ dev_priv->gt_irq_mask = ~render_irqs;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0);
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ /* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+#if 0 /* FIXME: check register definitions; some have moved */
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+#endif
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+
+ return 0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ POSTING_READ16(IER);
+}
+
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE16(EMR,
+ ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+ return 0;
+}
+
+static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u16 iir, new_iir;
+ u32 pipe_stats[2];
+ unsigned long irqflags;
+ int irq_received;
+ int pipe;
+ u16 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return IRQ_NONE;
+
+ while (iir & ~flip_mask) {
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE16(IIR, iir & ~flip_mask);
+ new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+ i915_update_dri1_breadcrumb(dev);
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 0)) {
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 1)) {
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ iir = new_iir;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ u32 flip[2] = {
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
+ };
+ int pipe, ret = IRQ_NONE;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+ bool blc_event = false;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /* Clear the PIPE*STAT regs before the IIR */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ POSTING_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (IS_MOBILE(dev))
+ plane = !plane;
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ if (iir & flip[plane]) {
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip(dev, pipe);
+ flip_mask &= ~flip[plane];
+ }
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ ret = IRQ_HANDLED;
+ iir = new_iir;
+ } while (iir & ~flip_mask);
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xffff);
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -2001,20 +2333,25 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
POSTING_READ(IER);
}
-/*
- * Must be called after intel_modeset_init or hotplug interrupts won't be
- * enabled correctly.
- */
-static int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+ u32 enable_mask;
u32 error_mask;
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask = ~dev_priv->irq_mask;
+ enable_mask |= I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -2081,31 +2418,124 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void ironlake_irq_uninstall(struct drm_device *dev)
+static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ int irq_received;
+ int ret = IRQ_NONE, pipe;
- if (!dev_priv)
- return;
+ atomic_inc(&dev_priv->irq_received);
- dev_priv->vblank_pipe = 0;
+ iir = I915_READ(IIR);
- I915_WRITE(HWSTAM, 0xffffffff);
+ for (;;) {
+ bool blc_event = false;
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
+ irq_received = iir != 0;
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
- I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
+ }
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
}
-static void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i965_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -2113,8 +2543,6 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->vblank_pipe = 0;
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2134,9 +2562,15 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
void intel_irq_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@@ -2147,7 +2581,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = NULL;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ dev->driver->irq_handler = valleyview_irq_handler;
+ dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_postinstall = valleyview_irq_postinstall;
+ dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ } else if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2155,6 +2596,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (IS_HASWELL(dev)) {
+ /* Share interrupts handling with IVB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2163,10 +2612,25 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
} else {
- dev->driver->irq_preinstall = i915_driver_irq_preinstall;
- dev->driver->irq_postinstall = i915_driver_irq_postinstall;
- dev->driver->irq_uninstall = i915_driver_irq_uninstall;
- dev->driver->irq_handler = i915_driver_irq_handler;
+ if (INTEL_INFO(dev)->gen == 2) {
+ dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_postinstall = i8xx_irq_postinstall;
+ dev->driver->irq_handler = i8xx_irq_handler;
+ dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ } else if (INTEL_INFO(dev)->gen == 3) {
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+ dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_handler = i915_irq_handler;
+ } else {
+ dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_postinstall = i965_irq_postinstall;
+ dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_handler = i965_irq_handler;
+ }
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d24d65f0c3..2d49b9507ed 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -27,6 +27,11 @@
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@@ -77,6 +82,7 @@
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GRDOM_RESET_ENABLE (1<<0)
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
@@ -125,6 +131,13 @@
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define GAC_ECO_BITS 0x14090
+#define ECOBITS_PPGTT_CACHE64B (3<<8)
+#define ECOBITS_PPGTT_CACHE4B (0<<8)
+
+#define GAB_CTL 0x24000
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -222,6 +235,7 @@
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -301,6 +315,61 @@
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
+/*
+ * DPIO - a special bus for various display related registers to hide behind:
+ * 0x800c: m1, m2, n, p1, p2, k dividers
+ * 0x8014: REF and SFR select
+ * 0x8014: N divider, VCO select
+ * 0x801c/3c: core clock bits
+ * 0x8048/68: low pass filter coefficients
+ * 0x8100: fast clock controls
+ */
+#define DPIO_PKT 0x2100
+#define DPIO_RID (0<<24)
+#define DPIO_OP_WRITE (1<<16)
+#define DPIO_OP_READ (0<<16)
+#define DPIO_PORTID (0x12<<8)
+#define DPIO_BYTE (0xf<<4)
+#define DPIO_BUSY (1<<0) /* status only */
+#define DPIO_DATA 0x2104
+#define DPIO_REG 0x2108
+#define DPIO_CTL 0x2110
+#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1<<1)
+#define DPIO_RESET (1<<0)
+
+#define _DPIO_DIV_A 0x800c
+#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
+#define DPIO_K_SHIFT (24) /* 4 bits */
+#define DPIO_P1_SHIFT (21) /* 3 bits */
+#define DPIO_P2_SHIFT (16) /* 5 bits */
+#define DPIO_N_SHIFT (12) /* 4 bits */
+#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
+#define DPIO_M2DIV_MASK 0xff
+#define _DPIO_DIV_B 0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A 0x8014
+#define DPIO_REFSEL_OVERRIDE 27
+#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
+#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B 0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A 0x801c
+#define _DPIO_CORE_CLK_B 0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_LFP_COEFF_A 0x8048
+#define _DPIO_LFP_COEFF_B 0x8068
+#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
+
+#define DPIO_FASTCLK_DISABLE 0x8100
/*
* Fence registers
@@ -360,8 +429,6 @@
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
-#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
-#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
@@ -417,6 +484,7 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
@@ -432,6 +500,7 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
@@ -447,14 +516,16 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
-
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_IIR_RW 0x182084
+#define VLV_IER 0x1820a0
+#define VLV_IIR 0x1820a4
+#define VLV_IMR 0x1820a8
+#define VLV_ISR 0x1820ac
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -500,7 +571,6 @@
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
-#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
@@ -565,7 +635,6 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
-#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -579,7 +648,12 @@
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
-/* GEN6 interrupt control */
+#define CACHE_MODE_1 0x7004 /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+/* GEN6 interrupt control
+ * Note that the per-ring interrupt bits do alias with the global interrupt bits
+ * in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -615,6 +689,21 @@
#define GEN6_BSD_RNCID 0x12198
+#define GEN7_FF_THREAD_MODE 0x20a0
+#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
+#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0<<12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+
/*
* Framebuffer compression (915+ only)
*/
@@ -743,9 +832,9 @@
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
- /* 6 reserved */
-#define GMBUS_PORT_DPD 7 /* HDMID */
-#define GMBUS_NUM_PORTS 8
+#define GMBUS_PORT_DPD 6 /* HDMID */
+#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
+#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@@ -797,7 +886,9 @@
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -809,6 +900,7 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@@ -904,6 +996,7 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
@@ -1044,6 +1137,9 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
+#define FW_BLC_SELF_VLV 0x6500
+#define FW_CSPWRDWNEN (1<<15)
+
/*
* Palette regs
*/
@@ -1601,9 +1697,12 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
+/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_PORT_D (3 << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
@@ -1614,6 +1713,10 @@
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
#define PP_STATUS 0x61200
@@ -2380,7 +2483,8 @@
/* Pipe A */
#define _PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK_GEN2 0x00000fff
+#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
@@ -2422,23 +2526,30 @@
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2463,6 +2574,40 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define VLV_DPFLIPSTAT 0x70028
+#define PIPEB_LINE_COMPARE_STATUS (1<<29)
+#define PIPEB_HLINE_INT_EN (1<<28)
+#define PIPEB_VBLANK_INT_EN (1<<27)
+#define SPRITED_FLIPDONE_INT_EN (1<<26)
+#define SPRITEC_FLIPDONE_INT_EN (1<<25)
+#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define PIPEA_LINE_COMPARE_STATUS (1<<21)
+#define PIPEA_HLINE_INT_EN (1<<20)
+#define PIPEA_VBLANK_INT_EN (1<<19)
+#define SPRITEB_FLIPDONE_INT_EN (1<<18)
+#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define PLANEA_FLIPDONE_INT_EN (1<<16)
+
+#define DPINVGTT 0x7002c /* VLV only */
+#define CURSORB_INVALID_GTT_INT_EN (1<<23)
+#define CURSORA_INVALID_GTT_INT_EN (1<<22)
+#define SPRITED_INVALID_GTT_INT_EN (1<<21)
+#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
+#define PLANEB_INVALID_GTT_INT_EN (1<<19)
+#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
+#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
+#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define DPINVGTT_EN_MASK 0xff0000
+#define CURSORB_INVALID_GTT_STATUS (1<<7)
+#define CURSORA_INVALID_GTT_STATUS (1<<6)
+#define SPRITED_INVALID_GTT_STATUS (1<<5)
+#define SPRITEC_INVALID_GTT_STATUS (1<<4)
+#define PLANEB_INVALID_GTT_STATUS (1<<3)
+#define SPRITEB_INVALID_GTT_STATUS (1<<2)
+#define SPRITEA_INVALID_GTT_STATUS (1<<1)
+#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define DPINVGTT_STATUS_MASK 0xff
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2492,11 +2637,28 @@
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32 32
+#define DRAIN_LATENCY_PRECISION_16 16
+#define VLV_DDL1 0x70050
+#define DDL_CURSORA_PRECISION_32 (1<<31)
+#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_SHIFT 24
+#define DDL_PLANEA_PRECISION_32 (1<<7)
+#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define VLV_DDL2 0x70054
+#define DDL_CURSORB_PRECISION_32 (1<<31)
+#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_SHIFT 24
+#define DDL_PLANEB_PRECISION_32 (1<<7)
+#define DDL_PLANEB_PRECISION_16 (0<<7)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
+#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
#define I965_FIFO_SIZE 512
#define I945_FIFO_SIZE 127
@@ -2504,6 +2666,7 @@
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
+#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
@@ -2518,6 +2681,7 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define VALLEYVIEW_CURSOR_MAX_WM 64
#define I965_CURSOR_FIFO 64
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
@@ -2726,6 +2890,13 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+ (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
+
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@@ -3058,25 +3229,38 @@
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
+#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
+#define MASTER_INTERRUPT_ENABLE (1<<31)
+
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
-/* GT interrupt */
-#define GT_PIPE_NOTIFY (1 << 4)
-#define GT_SYNC_STATUS (1 << 2)
-#define GT_USER_INTERRUPT (1 << 0)
-#define GT_BSD_USER_INTERRUPT (1 << 5)
-#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
-#define GT_BLT_USER_INTERRUPT (1 << 22)
+/* GT interrupt.
+ * Note that for gen6+ the ring-specific interrupt bits do alias with the
+ * corresponding bits in the per-ring interrupt control registers. */
+#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
+#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
+#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
+#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
+#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
+#define GT_PIPE_NOTIFY (1 << 4)
+#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -3226,15 +3410,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -3329,6 +3513,57 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_DATA_A 0x60208
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+
+#define VLV_VIDEO_DIP_CTL_B 0x61170
+#define VLV_VIDEO_DIP_DATA_B 0x61174
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A 0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define HSW_VIDEO_DIP_GCP_A 0x60210
+
+#define HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+#define HSW_VIDEO_DIP_GCP_B 0x61210
+
+#define HSW_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3489,6 +3724,9 @@
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+/* LPT */
+#define FDI_PORT_WIDTH_2X_LPT (1<<19)
+#define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
@@ -3549,6 +3787,7 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */
+#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3714,6 +3953,8 @@
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
#define FORCEWAKE 0xA18C
+#define FORCEWAKE_VLV 0x1300b0
+#define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
@@ -3731,6 +3972,7 @@
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@@ -3811,6 +4053,11 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define GEN6_GT_GFX_RC6 0x138108
+#define GEN6_GT_GFX_RC6p 0x13810C
+#define GEN6_GT_GFX_RC6pp 0x138110
+
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
@@ -3870,4 +4117,197 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Power Wells */
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
+
+/* Per-pipe DDI Function Control */
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
+ PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
+#define PIPE_DDI_FUNC_ENABLE (1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define PIPE_DDI_PORT_MASK (0xf<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
+#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A 0x64040
+#define DP_TP_CTL_B 0x64140
+#define DP_TP_CTL(port) _PORT(port, \
+ DP_TP_CTL_A, \
+ DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A 0x64044
+#define DP_TP_STATUS_B 0x64144
+#define DP_TP_STATUS(port) _PORT(port, \
+ DP_TP_STATUS_A, \
+ DP_TP_STATUS_B)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A 0x64000
+#define DDI_BUF_CTL_B 0x64100
+#define DDI_BUF_CTL(port) _PORT(port, \
+ DDI_BUF_CTL_A, \
+ DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A 0x64E00
+#define DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, \
+ DDI_BUF_TRANS_A, \
+ DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
+#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_OP_CRRD (0x6<<8)
+#define SBI_CTL_OP_CRWR (0x7<<8)
+#define SBI_RESPONSE_FAIL (0x1<<1)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_DBUFF0 0x2a00
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE 1<<0
+#define PIXCLK_GATE_GATE 0<<0
+
+/* SPLL */
+#define SPLL_CTL 0x46020
+#define SPLL_PLL_ENABLE (1<<31)
+#define SPLL_PLL_SCC (1<<28)
+#define SPLL_PLL_NON_SCC (2<<28)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A 0x46100
+#define PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _PORT(port, \
+ PORT_CLK_SEL_A, \
+ PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
+#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
+#define PORT_CLK_SEL_LCPLL_810 (2<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL1 (4<<29)
+#define PORT_CLK_SEL_WRPLL2 (5<<29)
+
+/* Pipe clock selection */
+#define PIPE_CLK_SEL_A 0x46140
+#define PIPE_CLK_SEL_B 0x46144
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
+ PIPE_CLK_SEL_A, \
+ PIPE_CLK_SEL_B)
+/* For each pipe, we need to select the corresponding port clock */
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+
+/* LCPLL Control */
+#define LCPLL_CTL 0x130040
+#define LCPLL_PLL_DISABLE (1<<31)
+#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A 0x45270
+#define PIPE_WM_LINETIME_B 0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
+ PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_A)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
+#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
+#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2b5eb229ff2..0ede02a99d9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
+ dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
@@ -876,22 +876,6 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
- mutex_unlock(&dev->struct_mutex);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (INTEL_INFO(dev)->gen >= 6) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
- mutex_lock(&dev->struct_mutex);
/* Cache mode state */
I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 00000000000..79f83445afa
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "i915_drv.h"
+
+static u32 calc_residency(struct drm_device *dev, const u32 reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 raw_time; /* 32b value may overflow during fixed point math */
+
+ if (!intel_enable_rc6(dev))
+ return 0;
+
+ raw_time = I915_READ(reg) * 128ULL;
+ return DIV_ROUND_UP_ULL(raw_time, 100000);
+}
+
+static ssize_t
+show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
+}
+
+static ssize_t
+show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
+}
+
+static ssize_t
+show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
+}
+
+static ssize_t
+show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
+}
+
+static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
+static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
+static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
+static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+
+static struct attribute *rc6_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6_attrs
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ int ret;
+
+ /* ILK doesn't have any residency information */
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ if (ret)
+ DRM_ERROR("sysfs setup failed\n");
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+ sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+}
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index ead876eb6ea..f1df2bd4ecf 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,5 +7,7 @@
#include "i915_drv.h"
+#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "i915_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bae3edf956a..f413899475e 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -9,6 +9,7 @@
#include <acpi/acpi_drivers.h>
#include "drmP.h"
+#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
@@ -182,8 +183,6 @@ static void intel_dsm_platform_mux_info(void)
DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
intel_dsm_mux_type(info->buffer.pointer[3]));
}
- } else {
- DRM_ERROR("MUX INFO call failed\n");
}
out:
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b48fc2a8410..353459362f6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
}
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+ const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+ size_t ofs;
+
+ if (index >= ARRAY_SIZE(ptrs->ptr))
+ return NULL;
+ ofs = ptrs->ptr[index].fp_timing_offset;
+ if (ofs < data_ofs ||
+ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+ return NULL;
+ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct bdb_lvds_lfp_data *lvds_lfp_data;
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock;
@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
"Normal Clock %dKHz, downclock %dKHz\n",
panel_fixed_mode->clock, 10*downclock);
}
+
+ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+ if (fp_timing) {
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+ dev_priv->bios_lvds_val);
+ }
+ }
}
/* Try to find sdvo panel data */
@@ -256,6 +292,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
int index;
index = i915_vbt_sdvo_panel_type;
+ if (index == -2) {
+ DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ return;
+ }
+
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
@@ -332,11 +373,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if (bus_pin >= 1 && bus_pin <= 6)
+ if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- block_size);
+ block_size);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 90b9793fd5d..75a70c46ef1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp, reg;
+ u32 temp;
- if (HAS_PCH_SPLIT(dev))
- reg = PCH_ADPA;
- else
- reg = ADPA;
+ temp = I915_READ(PCH_ADPA);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* Just leave port enable cleared */
+ break;
+ }
+
+ I915_WRITE(PCH_ADPA, temp);
+}
- temp = I915_READ(reg);
+static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(ADPA);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
@@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(reg, temp);
+ I915_WRITE(ADPA, temp);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -278,9 +296,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid;
bool is_digital = false;
+ struct i2c_adapter *i2c;
- edid = drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = drm_get_edid(connector, i2c);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
@@ -476,15 +495,16 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ struct i2c_adapter *i2c;
- ret = intel_ddc_get_modes(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ ret = intel_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ return intel_ddc_get_modes(connector, i2c);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,12 +527,20 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
- .dpms = intel_crt_dpms,
+static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
+ .dpms = pch_crt_dpms,
+};
+
+static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
+ .mode_fixup = intel_crt_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .commit = intel_encoder_commit,
+ .mode_set = intel_crt_mode_set,
+ .dpms = gmch_crt_dpms,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -536,7 +564,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+ DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
}
@@ -558,6 +586,7 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@@ -586,14 +615,23 @@ void intel_crt_init(struct drm_device *dev)
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ if (IS_HASWELL(dev))
+ crt->base.crtc_mask = (1 << 0);
+ else
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
+
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
+ if (HAS_PCH_SPLIT(dev))
+ encoder_helper_funcs = &pch_encoder_funcs;
+ else
+ encoder_helper_funcs = &gmch_encoder_funcs;
+
+ drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 00000000000..46d1e886c69
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00D75FFF, 0x0005000A,
+ 0x00C30FFF, 0x00040006,
+ 0x80AAAFFF, 0x000B0000,
+ 0x00FFFFFF, 0x0005000A,
+ 0x00D75FFF, 0x000C0004,
+ 0x80C30FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006,
+ 0x80D75FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00D75FFF, 0x000F000A,
+ 0x00C30FFF, 0x00060006,
+ 0x00AAAFFF, 0x001E0000,
+ 0x00FFFFFF, 0x000F000A,
+ 0x00D75FFF, 0x00160004,
+ 0x00C30FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00060006,
+ 0x00D75FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ int i;
+ const u32 *ddi_translations = ((use_fdi_mode) ?
+ hsw_ddi_translations_fdi :
+ hsw_ddi_translations_dp);
+
+ DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+ port_name(port),
+ use_fdi_mode ? "FDI" : "DP");
+
+ WARN((use_fdi_mode && (port != PORT_E)),
+ "Programming port %c in FDI mode, this probably will not work.\n",
+ port_name(port));
+
+ for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ I915_WRITE(reg, ddi_translations[i]);
+ reg += 4;
+ }
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+ int port;
+
+ if (IS_HASWELL(dev)) {
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such by
+ * default. It will have to be re-programmed in case a digital DP output
+ * will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
+ }
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+ DDI_BUF_EMP_400MV_0DB_HSW,
+ DDI_BUF_EMP_400MV_3_5DB_HSW,
+ DDI_BUF_EMP_400MV_6DB_HSW,
+ DDI_BUF_EMP_400MV_9_5DB_HSW,
+ DDI_BUF_EMP_600MV_0DB_HSW,
+ DDI_BUF_EMP_600MV_3_5DB_HSW,
+ DDI_BUF_EMP_600MV_6DB_HSW,
+ DDI_BUF_EMP_800MV_0DB_HSW,
+ DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Configure CPU PLL, wait for warmup */
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE |
+ SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SCC);
+
+ /* Use SPLL to drive the output when in FDI mode */
+ I915_WRITE(PORT_CLK_SEL(PORT_E),
+ PORT_CLK_SEL_SPLL);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(PORT_E));
+
+ udelay(20);
+
+ /* Start the training iterating through available voltages and emphasis */
+ for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = (temp & ~DDI_BUF_EMP_MASK);
+ I915_WRITE(DDI_BUF_CTL(PORT_E),
+ temp |
+ DDI_BUF_CTL_ENABLE |
+ DDI_PORT_WIDTH_X2 |
+ hsw_ddi_buf_ctl_values[i]);
+
+ udelay(600);
+
+ /* Enable CPU FDI Receiver with auto-training */
+ reg = FDI_RX_CTL(pipe);
+ I915_WRITE(reg,
+ I915_READ(reg) |
+ FDI_LINK_TRAIN_AUTO |
+ FDI_RX_ENABLE |
+ FDI_LINK_TRAIN_PATTERN_1_CPT |
+ FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_PORT_WIDTH_2X_LPT |
+ FDI_RX_PLL_ENABLE);
+ POSTING_READ(reg);
+ udelay(100);
+
+ temp = I915_READ(DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+
+ /* Enable normal pixel sending for FDI */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
+ PIPE_DDI_MODE_SELECT_FDI |
+ PIPE_DDI_FUNC_ENABLE |
+ PIPE_DDI_PORT_WIDTH_X2;
+ I915_WRITE(DDI_FUNC_CTL(pipe),
+ temp);
+ break;
+ } else {
+ DRM_ERROR("Error training BUF_CTL %d\n", i);
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ I915_READ(DP_TP_CTL(PORT_E)) &
+ ~DP_TP_CTL_ENABLE);
+ I915_WRITE(FDI_RX_CTL(pipe),
+ I915_READ(FDI_RX_CTL(pipe)) &
+ ~FDI_RX_PLL_ENABLE);
+ continue;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* For DDI connections, it is possible to support different outputs over the
+ * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
+ * the time the output is detected what exactly is on the other end of it. This
+ * function aims at providing support for this detection and proper output
+ * configuration.
+ */
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ /* For now, we don't do any proper output detection and assume that we
+ * handle HDMI only */
+
+ switch(port){
+ case PORT_A:
+ /* We don't handle eDP and DP yet */
+ DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
+ break;
+ /* Assume that the ports B, C and D are working in HDMI mode for now */
+ case PORT_B:
+ case PORT_C:
+ case PORT_D:
+ intel_hdmi_init(dev, DDI_BUF_CTL(port));
+ break;
+ default:
+ DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
+ port);
+ break;
+ }
+}
+
+/* WRPLL clock dividers */
+struct wrpll_tmds_clock {
+ u32 clock;
+ u16 p; /* Post divider */
+ u16 n2; /* Feedback divider */
+ u16 r2; /* Reference divider */
+};
+
+/* Table of matching values for WRPLL clocks programming for each frequency */
+static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
+ {19750, 38, 25, 18},
+ {20000, 48, 32, 18},
+ {21000, 36, 21, 15},
+ {21912, 42, 29, 17},
+ {22000, 36, 22, 15},
+ {23000, 36, 23, 15},
+ {23500, 40, 40, 23},
+ {23750, 26, 16, 14},
+ {23750, 26, 16, 14},
+ {24000, 36, 24, 15},
+ {25000, 36, 25, 15},
+ {25175, 26, 40, 33},
+ {25200, 30, 21, 15},
+ {26000, 36, 26, 15},
+ {27000, 30, 21, 14},
+ {27027, 18, 100, 111},
+ {27500, 30, 29, 19},
+ {28000, 34, 30, 17},
+ {28320, 26, 30, 22},
+ {28322, 32, 42, 25},
+ {28750, 24, 23, 18},
+ {29000, 30, 29, 18},
+ {29750, 32, 30, 17},
+ {30000, 30, 25, 15},
+ {30750, 30, 41, 24},
+ {31000, 30, 31, 18},
+ {31500, 30, 28, 16},
+ {32000, 30, 32, 18},
+ {32500, 28, 32, 19},
+ {33000, 24, 22, 15},
+ {34000, 28, 30, 17},
+ {35000, 26, 32, 19},
+ {35500, 24, 30, 19},
+ {36000, 26, 26, 15},
+ {36750, 26, 46, 26},
+ {37000, 24, 23, 14},
+ {37762, 22, 40, 26},
+ {37800, 20, 21, 15},
+ {38000, 24, 27, 16},
+ {38250, 24, 34, 20},
+ {39000, 24, 26, 15},
+ {40000, 24, 32, 18},
+ {40500, 20, 21, 14},
+ {40541, 22, 147, 89},
+ {40750, 18, 19, 14},
+ {41000, 16, 17, 14},
+ {41500, 22, 44, 26},
+ {41540, 22, 44, 26},
+ {42000, 18, 21, 15},
+ {42500, 22, 45, 26},
+ {43000, 20, 43, 27},
+ {43163, 20, 24, 15},
+ {44000, 18, 22, 15},
+ {44900, 20, 108, 65},
+ {45000, 20, 25, 15},
+ {45250, 20, 52, 31},
+ {46000, 18, 23, 15},
+ {46750, 20, 45, 26},
+ {47000, 20, 40, 23},
+ {48000, 18, 24, 15},
+ {49000, 18, 49, 30},
+ {49500, 16, 22, 15},
+ {50000, 18, 25, 15},
+ {50500, 18, 32, 19},
+ {51000, 18, 34, 20},
+ {52000, 18, 26, 15},
+ {52406, 14, 34, 25},
+ {53000, 16, 22, 14},
+ {54000, 16, 24, 15},
+ {54054, 16, 173, 108},
+ {54500, 14, 24, 17},
+ {55000, 12, 22, 18},
+ {56000, 14, 45, 31},
+ {56250, 16, 25, 15},
+ {56750, 14, 25, 17},
+ {57000, 16, 27, 16},
+ {58000, 16, 43, 25},
+ {58250, 16, 38, 22},
+ {58750, 16, 40, 23},
+ {59000, 14, 26, 17},
+ {59341, 14, 40, 26},
+ {59400, 16, 44, 25},
+ {60000, 16, 32, 18},
+ {60500, 12, 39, 29},
+ {61000, 14, 49, 31},
+ {62000, 14, 37, 23},
+ {62250, 14, 42, 26},
+ {63000, 12, 21, 15},
+ {63500, 14, 28, 17},
+ {64000, 12, 27, 19},
+ {65000, 14, 32, 19},
+ {65250, 12, 29, 20},
+ {65500, 12, 32, 22},
+ {66000, 12, 22, 15},
+ {66667, 14, 38, 22},
+ {66750, 10, 21, 17},
+ {67000, 14, 33, 19},
+ {67750, 14, 58, 33},
+ {68000, 14, 30, 17},
+ {68179, 14, 46, 26},
+ {68250, 14, 46, 26},
+ {69000, 12, 23, 15},
+ {70000, 12, 28, 18},
+ {71000, 12, 30, 19},
+ {72000, 12, 24, 15},
+ {73000, 10, 23, 17},
+ {74000, 12, 23, 14},
+ {74176, 8, 100, 91},
+ {74250, 10, 22, 16},
+ {74481, 12, 43, 26},
+ {74500, 10, 29, 21},
+ {75000, 12, 25, 15},
+ {75250, 10, 39, 28},
+ {76000, 12, 27, 16},
+ {77000, 12, 53, 31},
+ {78000, 12, 26, 15},
+ {78750, 12, 28, 16},
+ {79000, 10, 38, 26},
+ {79500, 10, 28, 19},
+ {80000, 12, 32, 18},
+ {81000, 10, 21, 14},
+ {81081, 6, 100, 111},
+ {81624, 8, 29, 24},
+ {82000, 8, 17, 14},
+ {83000, 10, 40, 26},
+ {83950, 10, 28, 18},
+ {84000, 10, 28, 18},
+ {84750, 6, 16, 17},
+ {85000, 6, 17, 18},
+ {85250, 10, 30, 19},
+ {85750, 10, 27, 17},
+ {86000, 10, 43, 27},
+ {87000, 10, 29, 18},
+ {88000, 10, 44, 27},
+ {88500, 10, 41, 25},
+ {89000, 10, 28, 17},
+ {89012, 6, 90, 91},
+ {89100, 10, 33, 20},
+ {90000, 10, 25, 15},
+ {91000, 10, 32, 19},
+ {92000, 10, 46, 27},
+ {93000, 10, 31, 18},
+ {94000, 10, 40, 23},
+ {94500, 10, 28, 16},
+ {95000, 10, 44, 25},
+ {95654, 10, 39, 22},
+ {95750, 10, 39, 22},
+ {96000, 10, 32, 18},
+ {97000, 8, 23, 16},
+ {97750, 8, 42, 29},
+ {98000, 8, 45, 31},
+ {99000, 8, 22, 15},
+ {99750, 8, 34, 23},
+ {100000, 6, 20, 18},
+ {100500, 6, 19, 17},
+ {101000, 6, 37, 33},
+ {101250, 8, 21, 14},
+ {102000, 6, 17, 15},
+ {102250, 6, 25, 22},
+ {103000, 8, 29, 19},
+ {104000, 8, 37, 24},
+ {105000, 8, 28, 18},
+ {106000, 8, 22, 14},
+ {107000, 8, 46, 29},
+ {107214, 8, 27, 17},
+ {108000, 8, 24, 15},
+ {108108, 8, 173, 108},
+ {109000, 6, 23, 19},
+ {109000, 6, 23, 19},
+ {110000, 6, 22, 18},
+ {110013, 6, 22, 18},
+ {110250, 8, 49, 30},
+ {110500, 8, 36, 22},
+ {111000, 8, 23, 14},
+ {111264, 8, 150, 91},
+ {111375, 8, 33, 20},
+ {112000, 8, 63, 38},
+ {112500, 8, 25, 15},
+ {113100, 8, 57, 34},
+ {113309, 8, 42, 25},
+ {114000, 8, 27, 16},
+ {115000, 6, 23, 18},
+ {116000, 8, 43, 25},
+ {117000, 8, 26, 15},
+ {117500, 8, 40, 23},
+ {118000, 6, 38, 29},
+ {119000, 8, 30, 17},
+ {119500, 8, 46, 26},
+ {119651, 8, 39, 22},
+ {120000, 8, 32, 18},
+ {121000, 6, 39, 29},
+ {121250, 6, 31, 23},
+ {121750, 6, 23, 17},
+ {122000, 6, 42, 31},
+ {122614, 6, 30, 22},
+ {123000, 6, 41, 30},
+ {123379, 6, 37, 27},
+ {124000, 6, 51, 37},
+ {125000, 6, 25, 18},
+ {125250, 4, 13, 14},
+ {125750, 4, 27, 29},
+ {126000, 6, 21, 15},
+ {127000, 6, 24, 17},
+ {127250, 6, 41, 29},
+ {128000, 6, 27, 19},
+ {129000, 6, 43, 30},
+ {129859, 4, 25, 26},
+ {130000, 6, 26, 18},
+ {130250, 6, 42, 29},
+ {131000, 6, 32, 22},
+ {131500, 6, 38, 26},
+ {131850, 6, 41, 28},
+ {132000, 6, 22, 15},
+ {132750, 6, 28, 19},
+ {133000, 6, 34, 23},
+ {133330, 6, 37, 25},
+ {134000, 6, 61, 41},
+ {135000, 6, 21, 14},
+ {135250, 6, 167, 111},
+ {136000, 6, 62, 41},
+ {137000, 6, 35, 23},
+ {138000, 6, 23, 15},
+ {138500, 6, 40, 26},
+ {138750, 6, 37, 24},
+ {139000, 6, 34, 22},
+ {139050, 6, 34, 22},
+ {139054, 6, 34, 22},
+ {140000, 6, 28, 18},
+ {141000, 6, 36, 23},
+ {141500, 6, 22, 14},
+ {142000, 6, 30, 19},
+ {143000, 6, 27, 17},
+ {143472, 4, 17, 16},
+ {144000, 6, 24, 15},
+ {145000, 6, 29, 18},
+ {146000, 6, 47, 29},
+ {146250, 6, 26, 16},
+ {147000, 6, 49, 30},
+ {147891, 6, 23, 14},
+ {148000, 6, 23, 14},
+ {148250, 6, 28, 17},
+ {148352, 4, 100, 91},
+ {148500, 6, 33, 20},
+ {149000, 6, 48, 29},
+ {150000, 6, 25, 15},
+ {151000, 4, 19, 17},
+ {152000, 6, 27, 16},
+ {152280, 6, 44, 26},
+ {153000, 6, 34, 20},
+ {154000, 6, 53, 31},
+ {155000, 6, 31, 18},
+ {155250, 6, 50, 29},
+ {155750, 6, 45, 26},
+ {156000, 6, 26, 15},
+ {157000, 6, 61, 35},
+ {157500, 6, 28, 16},
+ {158000, 6, 65, 37},
+ {158250, 6, 44, 25},
+ {159000, 6, 53, 30},
+ {159500, 6, 39, 22},
+ {160000, 6, 32, 18},
+ {161000, 4, 31, 26},
+ {162000, 4, 18, 15},
+ {162162, 4, 131, 109},
+ {162500, 4, 53, 44},
+ {163000, 4, 29, 24},
+ {164000, 4, 17, 14},
+ {165000, 4, 22, 18},
+ {166000, 4, 32, 26},
+ {167000, 4, 26, 21},
+ {168000, 4, 46, 37},
+ {169000, 4, 104, 83},
+ {169128, 4, 64, 51},
+ {169500, 4, 39, 31},
+ {170000, 4, 34, 27},
+ {171000, 4, 19, 15},
+ {172000, 4, 51, 40},
+ {172750, 4, 32, 25},
+ {172800, 4, 32, 25},
+ {173000, 4, 41, 32},
+ {174000, 4, 49, 38},
+ {174787, 4, 22, 17},
+ {175000, 4, 35, 27},
+ {176000, 4, 30, 23},
+ {177000, 4, 38, 29},
+ {178000, 4, 29, 22},
+ {178500, 4, 37, 28},
+ {179000, 4, 53, 40},
+ {179500, 4, 73, 55},
+ {180000, 4, 20, 15},
+ {181000, 4, 55, 41},
+ {182000, 4, 31, 23},
+ {183000, 4, 42, 31},
+ {184000, 4, 30, 22},
+ {184750, 4, 26, 19},
+ {185000, 4, 37, 27},
+ {186000, 4, 51, 37},
+ {187000, 4, 36, 26},
+ {188000, 4, 32, 23},
+ {189000, 4, 21, 15},
+ {190000, 4, 38, 27},
+ {190960, 4, 41, 29},
+ {191000, 4, 41, 29},
+ {192000, 4, 27, 19},
+ {192250, 4, 37, 26},
+ {193000, 4, 20, 14},
+ {193250, 4, 53, 37},
+ {194000, 4, 23, 16},
+ {194208, 4, 23, 16},
+ {195000, 4, 26, 18},
+ {196000, 4, 45, 31},
+ {197000, 4, 35, 24},
+ {197750, 4, 41, 28},
+ {198000, 4, 22, 15},
+ {198500, 4, 25, 17},
+ {199000, 4, 28, 19},
+ {200000, 4, 37, 25},
+ {201000, 4, 61, 41},
+ {202000, 4, 112, 75},
+ {202500, 4, 21, 14},
+ {203000, 4, 146, 97},
+ {204000, 4, 62, 41},
+ {204750, 4, 44, 29},
+ {205000, 4, 38, 25},
+ {206000, 4, 29, 19},
+ {207000, 4, 23, 15},
+ {207500, 4, 40, 26},
+ {208000, 4, 37, 24},
+ {208900, 4, 48, 31},
+ {209000, 4, 48, 31},
+ {209250, 4, 31, 20},
+ {210000, 4, 28, 18},
+ {211000, 4, 25, 16},
+ {212000, 4, 22, 14},
+ {213000, 4, 30, 19},
+ {213750, 4, 38, 24},
+ {214000, 4, 46, 29},
+ {214750, 4, 35, 22},
+ {215000, 4, 43, 27},
+ {216000, 4, 24, 15},
+ {217000, 4, 37, 23},
+ {218000, 4, 42, 26},
+ {218250, 4, 42, 26},
+ {218750, 4, 34, 21},
+ {219000, 4, 47, 29},
+ {219000, 4, 47, 29},
+ {220000, 4, 44, 27},
+ {220640, 4, 49, 30},
+ {220750, 4, 36, 22},
+ {221000, 4, 36, 22},
+ {222000, 4, 23, 14},
+ {222525, 4, 28, 17},
+ {222750, 4, 33, 20},
+ {227000, 4, 37, 22},
+ {230250, 4, 29, 17},
+ {233500, 4, 38, 22},
+ {235000, 4, 40, 23},
+ {238000, 4, 30, 17},
+ {241500, 2, 17, 19},
+ {245250, 2, 20, 22},
+ {247750, 2, 22, 24},
+ {253250, 2, 15, 16},
+ {256250, 2, 18, 19},
+ {262500, 2, 31, 32},
+ {267250, 2, 66, 67},
+ {268500, 2, 94, 95},
+ {270000, 2, 14, 14},
+ {272500, 2, 77, 76},
+ {273750, 2, 57, 56},
+ {280750, 2, 24, 23},
+ {281250, 2, 23, 22},
+ {286000, 2, 17, 16},
+ {291750, 2, 26, 24},
+ {296703, 2, 56, 51},
+ {297000, 2, 22, 20},
+ {298000, 2, 21, 19},
+};
+
+void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ int pipe = intel_crtc->pipe;
+ int p, n2, r2, valid=0;
+ u32 temp, i;
+
+ /* On Haswell, we need to enable the clocks and prepare DDI function to
+ * work in HDMI mode for this pipe.
+ */
+ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+
+ for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
+ if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
+
+ DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
+ crtc->mode.clock,
+ p, n2, r2);
+
+ valid = 1;
+ break;
+ }
+ }
+
+ if (!valid) {
+ DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
+ crtc->mode.clock);
+ return;
+ }
+
+ /* Enable LCPLL if disabled */
+ temp = I915_READ(LCPLL_CTL);
+ if (temp & LCPLL_PLL_DISABLE)
+ I915_WRITE(LCPLL_CTL,
+ temp & ~LCPLL_PLL_DISABLE);
+
+ /* Configure WR PLL 1, program the correct divider values for
+ * the desired frequency and wait for warmup */
+ I915_WRITE(WRPLL_CTL1,
+ WRPLL_PLL_ENABLE |
+ WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p));
+
+ udelay(20);
+
+ /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
+ * this port for connection.
+ */
+ I915_WRITE(PORT_CLK_SEL(port),
+ PORT_CLK_SEL_WRPLL1);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(port));
+
+ udelay(20);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic and a new set
+ * of registers, so we leave it for future patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+ pipe_name(intel_crtc->pipe));
+ }
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp &= ~PIPE_DDI_BPC_12;
+ temp |= PIPE_DDI_SELECT_PORT(port) |
+ PIPE_DDI_MODE_SELECT_HDMI |
+ ((intel_crtc->bpp > 24) ?
+ PIPE_DDI_BPC_12 :
+ PIPE_DDI_BPC_8) |
+ PIPE_DDI_FUNC_ENABLE;
+
+ I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
+
+ temp = I915_READ(DDI_BUF_CTL(port));
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ } else {
+ temp |= DDI_BUF_CTL_ENABLE;
+ }
+
+ /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
+ * and swing/emphasis values are ignored so nothing special needs
+ * to be done besides enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ temp);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b1cf3b3ff5..ee61ad1e642 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/cpufreq.h>
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -44,7 +44,6 @@
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -360,6 +359,88 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
+u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+ unsigned long flags;
+ u32 val = 0;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+ DPIO_BYTE);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO read wait timed out\n");
+ goto out_unlock;
+ }
+ val = I915_READ(DPIO_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return val;
+}
+
+static void vlv_init_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Reset the DPIO config */
+ I915_WRITE(DPIO_CTL, 0);
+ POSTING_READ(DPIO_CTL);
+ I915_WRITE(DPIO_CTL, 1);
+ POSTING_READ(DPIO_CTL);
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ if (dev_priv->lvds_val)
+ val = dev_priv->lvds_val;
+ else {
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(reg);
+ if (!(val & ~LVDS_DETECTED))
+ val = dev_priv->bios_lvds_val;
+ dev_priv->lvds_val = val;
+ }
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
@@ -368,8 +449,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP) {
+ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -397,8 +477,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
@@ -536,8 +615,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
* reliably set up different single/dual channel state, if we
* even can.
*/
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -706,6 +784,17 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
+static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 frame, frame_reg = PIPEFRAME(pipe);
+
+ frame = I915_READ(frame_reg);
+
+ if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
@@ -719,6 +808,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ironlake_wait_for_vblank(dev, pipe);
+ return;
+ }
+
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
@@ -771,15 +865,20 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
- u32 last_line;
+ u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
/* Wait for the display line to settle */
do {
- last_line = I915_READ(reg) & DSL_LINEMASK;
+ last_line = I915_READ(reg) & line_mask;
mdelay(5);
- } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ } while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
@@ -811,26 +910,33 @@ static void assert_pll(struct drm_i915_private *dev_priv,
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+ struct intel_crtc *intel_crtc, bool state)
{
int reg;
u32 val;
bool cur_state;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+ return;
+ }
+
+ if (!intel_crtc->pch_pll) {
+ WARN(1, "asserting PCH PLL enabled with no PLL\n");
+ return;
+ }
+
if (HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
pch_dpll = I915_READ(PCH_DPLL_SEL);
/* Make sure the selected PLL is enabled to the transcoder */
- WARN(!((pch_dpll >> (4 * pipe)) & 8),
- "transcoder %d PLL not enabled\n", pipe);
-
- /* Convert the transcoder pipe number to a pll pipe number */
- pipe = (pch_dpll >> (4 * pipe)) & 1;
+ WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
+ "transcoder %d PLL not enabled\n", intel_crtc->pipe);
}
- reg = PCH_DPLL(pipe);
+ reg = intel_crtc->pch_pll->pll_reg;
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
@@ -847,9 +953,16 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_TX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_TX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev)) {
+ /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ reg = DDI_FUNC_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+ } else {
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ }
WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -864,9 +977,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
+ return;
+ } else {
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ }
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -884,6 +1002,10 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
if (dev_priv->info->gen == 5)
return;
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (IS_HASWELL(dev_priv->dev))
+ return;
+
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
@@ -895,6 +1017,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
+ return;
+ }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1000,6 +1126,11 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val;
bool enabled;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+ return;
+ }
+
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1198,6 +1329,69 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(reg);
}
+/* SBI access */
+static void
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_DATA,
+ value);
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRWR);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ goto out_unlock;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+}
+
+static u32
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRRD);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ goto out_unlock;
+ }
+
+ value = I915_READ(SBI_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return value;
+}
+
/**
* intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
@@ -1206,60 +1400,88 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
int reg;
u32 val;
- if (pipe > 1)
+ /* PCH PLLs only available on ILK, SNB and IVB */
+ BUG_ON(dev_priv->info->gen < 5);
+ pll = intel_crtc->pch_pll;
+ if (pll == NULL)
return;
- /* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
+ if (WARN_ON(pll->refcount == 0))
+ return;
+
+ DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
- reg = PCH_DPLL(pipe);
+ if (pll->active++ && pll->on) {
+ assert_pch_pll_enabled(dev_priv, intel_crtc);
+ return;
+ }
+
+ DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
+
+ reg = pll->pll_reg;
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = true;
}
-static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
int reg;
- u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
- pll_sel = TRANSC_DPLL_ENABLE;
-
- if (pipe > 1)
- return;
+ u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
+ if (pll == NULL)
+ return;
- /* Make sure transcoder isn't still depending on us */
- assert_transcoder_disabled(dev_priv, pipe);
+ if (WARN_ON(pll->refcount == 0))
+ return;
- if (pipe == 0)
- pll_sel |= TRANSC_DPLLA_SEL;
- else if (pipe == 1)
- pll_sel |= TRANSC_DPLLB_SEL;
+ DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
+ if (WARN_ON(pll->active == 0)) {
+ assert_pch_pll_disabled(dev_priv, intel_crtc);
+ return;
+ }
- if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ if (--pll->active) {
+ assert_pch_pll_enabled(dev_priv, intel_crtc);
return;
+ }
+
+ DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
- reg = PCH_DPLL(pipe);
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
+
+ reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = false;
}
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
@@ -1273,12 +1495,16 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
- assert_pch_pll_enabled(dev_priv, pipe);
+ assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
+ return;
+ }
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1415,7 +1641,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@ -1526,490 +1752,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
- int plane, i;
- u32 fbc_ctl, fbc_ctl2;
-
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
-
- /* FBC_CTL wants 64B units */
- cfb_pitch = (cfb_pitch / 64) - 1;
- plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
-
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
-
- /* enable it... */
- fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= obj->fence_reg;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
- cfb_pitch, crtc->y, intel_crtc->plane);
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
- gen6_gt_force_wake_get(dev_priv);
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= DPFC_RESERVED;
- dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- /* Set persistent mode for front-buffer rendering, ala X. */
- dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
- /* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_GEN6(dev)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
- }
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.fbc_enabled)
- return false;
-
- return dev_priv->display.fbc_enabled(dev);
-}
-
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc,
- work->interval);
-
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
- }
-
- dev_priv->fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.enable_fbc)
- return;
-
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL) {
- dev_priv->display.enable_fbc(crtc, interval);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->fb;
- work->interval = interval;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_cancel_fbc_work(dev_priv);
-
- if (!dev_priv->display.disable_fbc)
- return;
-
- dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= 2048 in width, 1536 in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-static void intel_update_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- struct intel_crtc *intel_crtc;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
- int enable_fbc;
-
- DRM_DEBUG_KMS("\n");
-
- if (!i915_powersave)
- return;
-
- if (!I915_HAS_FBC(dev))
- return;
-
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - more than one pipe is active
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled && tmp_crtc->fb) {
- if (crtc) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->fb == NULL) {
- DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
- goto out_disable;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- fb = crtc->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 6)
- enable_fbc = 0;
- }
- if (!enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
- goto out_disable;
- }
- if (intel_fb->obj->base.size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- goto out_disable;
- }
- if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
- (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
- goto out_disable;
- }
- if ((crtc->mode.hdisplay > 2048) ||
- (crtc->mode.vdisplay > 1536)) {
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
- goto out_disable;
- }
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
- goto out_disable;
- }
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
- goto out_disable;
- }
-
- /* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
- goto out_disable;
-
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
- intel_enable_fbc(crtc, 500);
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_disable_fbc(dev);
- }
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2050,13 +1792,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence(obj, pipelined);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
- i915_gem_object_pin_fence(obj);
- }
+ i915_gem_object_pin_fence(obj);
dev_priv->mm.interruptible = true;
return 0;
@@ -2137,7 +1877,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
} else
@@ -2217,7 +1957,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
POSTING_READ(reg);
@@ -2232,16 +1972,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = dev_priv->display.update_plane(crtc, fb, x, y);
- if (ret)
- return ret;
- intel_update_fbc(dev);
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
- return 0;
+ return dev_priv->display.update_plane(crtc, fb, x, y);
}
static int
@@ -2276,6 +2012,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
@@ -2286,16 +2023,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (intel_crtc->plane) {
- case 0:
- case 1:
- break;
- case 2:
- if (IS_IVYBRIDGE(dev))
- break;
- /* fall through otherwise */
- default:
- DRM_ERROR("no plane for crtc\n");
+ if(intel_crtc->plane > dev_priv->num_pipe) {
+ DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
+ intel_crtc->plane,
+ dev_priv->num_pipe);
return -EINVAL;
}
@@ -2312,8 +2043,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb)
intel_finish_fb(old_fb);
- ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
- LEAVE_ATOMIC_MODE_SET);
+ ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -2326,6 +2056,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
+ intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
@@ -2547,7 +2278,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
+ u32 reg, temp, i, retry;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -2599,15 +2330,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
@@ -2648,15 +2383,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
@@ -2808,14 +2547,18 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(200);
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ /* On Haswell, the PLL configuration for ports and pipes is handled
+ * separately, as part of DDI setup */
+ if (!IS_HASWELL(dev)) {
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
+ }
}
}
@@ -2888,38 +2631,16 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-/*
- * When we disable a pipe, we need to clear any pending scanline wait events
- * to avoid hanging the ring, which we assume we are waiting on.
- */
-static void intel_clear_scanline_wait(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- u32 tmp;
-
- if (IS_GEN2(dev))
- /* Can't break the hang on i8xx */
- return;
-
- ring = LP_RING(dev_priv);
- tmp = I915_READ_CTL(ring);
- if (tmp & RING_WAIT)
- I915_WRITE_CTL(ring, tmp);
-}
-
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj;
- struct drm_i915_private *dev_priv;
+ struct drm_device *dev = crtc->dev;
if (crtc->fb == NULL)
return;
- obj = to_intel_framebuffer(crtc->fb)->obj;
- dev_priv = crtc->dev->dev_private;
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj->pending_flip) == 0);
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -2936,6 +2657,22 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
if (encoder->base.crtc != crtc)
continue;
+ /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
+ * CPU handles all others */
+ if (IS_HASWELL(dev)) {
+ /* It is still unclear how this will work on PPT, so throw up a warning */
+ WARN_ON(!HAS_PCH_LPT(dev));
+
+ if (encoder->type == DRM_MODE_ENCODER_DAC) {
+ DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
+ return true;
+ } else {
+ DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
+ encoder->type);
+ return false;
+ }
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
@@ -2947,6 +2684,97 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ /* It is necessary to ungate the pixclk gate prior to programming
+ * the divisors, and gate it back when it is done.
+ */
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ /* Disable SSCCTL */
+ intel_sbi_write(dev_priv, SBI_SSCCTL6,
+ intel_sbi_read(dev_priv, SBI_SSCCTL6) |
+ SBI_SSCCTL_DISABLE);
+
+ /* 20MHz is a corner case which is out of range for the 7-bit divisor */
+ if (crtc->mode.clock == 20000) {
+ auxdiv = 1;
+ divsel = 0x41;
+ phaseinc = 0x20;
+ } else {
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the crtc->mode.clock in in KHz. To get the divisors,
+ * it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor, msb_divisor_value, pi_value;
+
+ desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ msb_divisor_value = desired_divisor / iclk_pi_range;
+ pi_value = desired_divisor % iclk_pi_range;
+
+ auxdiv = 0;
+ divsel = msb_divisor_value - 2;
+ phaseinc = pi_value;
+ }
+
+ /* This should not happen with any sane values */
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ crtc->mode.clock,
+ auxdiv,
+ divsel,
+ phasedir,
+ phaseinc);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+
+ intel_sbi_write(dev_priv,
+ SBI_SSCDIVINTPHASE6,
+ temp);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv,
+ SBI_SSCAUXDIV6,
+ temp);
+
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv,
+ SBI_SSCCTL6,
+ temp);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
@@ -2961,29 +2789,41 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, transc_sel;
+ u32 reg, temp;
+
+ assert_transcoder_disabled(dev_priv, pipe);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(dev_priv, pipe);
+ intel_enable_pch_pll(intel_crtc);
- if (HAS_PCH_CPT(dev)) {
- transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
- TRANSC_DPLLB_SEL;
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
+ lpt_program_iclkip(crtc);
+ } else if (HAS_PCH_CPT(dev)) {
+ u32 sel;
- /* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0) {
- temp &= ~(TRANSA_DPLLB_SEL);
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- } else if (pipe == 1) {
- temp &= ~(TRANSB_DPLLB_SEL);
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- } else if (pipe == 2) {
- temp &= ~(TRANSC_DPLLB_SEL);
- temp |= (TRANSC_DPLL_ENABLE | transc_sel);
+ switch (pipe) {
+ default:
+ case 0:
+ temp |= TRANSA_DPLL_ENABLE;
+ sel = TRANSA_DPLLB_SEL;
+ break;
+ case 1:
+ temp |= TRANSB_DPLL_ENABLE;
+ sel = TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ temp |= TRANSC_DPLL_ENABLE;
+ sel = TRANSC_DPLLB_SEL;
+ break;
}
+ if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
+ temp |= sel;
+ else
+ temp &= ~sel;
I915_WRITE(PCH_DPLL_SEL, temp);
}
@@ -2998,7 +2838,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- intel_fdi_normal_train(crtc);
+ if (!IS_HASWELL(dev))
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3041,6 +2882,93 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_transcoder(dev_priv, pipe);
}
+static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
+{
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
+
+ if (pll == NULL)
+ return;
+
+ if (pll->refcount == 0) {
+ WARN(1, "bad PCH PLL refcount\n");
+ return;
+ }
+
+ --pll->refcount;
+ intel_crtc->pch_pll = NULL;
+}
+
+static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
+ int i;
+
+ pll = intel_crtc->pch_pll;
+ if (pll) {
+ DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto prepare;
+ }
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = intel_crtc->pipe;
+ pll = &dev_priv->pch_plls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+
+ goto found;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->refcount == 0)
+ continue;
+
+ if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
+ fp == I915_READ(pll->fp0_reg)) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
+ intel_crtc->base.base.id,
+ pll->pll_reg, pll->refcount, pll->active);
+
+ goto found;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+ if (pll->refcount == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto found;
+ }
+ }
+
+ return NULL;
+
+found:
+ intel_crtc->pch_pll = pll;
+ pll->refcount++;
+ DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+prepare: /* separate function? */
+ DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
+
+ /* Wait for the clocks to stabilize before rewriting the regs */
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(pll->pll_reg);
+ udelay(150);
+
+ I915_WRITE(pll->fp0_reg, fp);
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ pll->on = false;
+ return pll;
+}
+
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3185,8 +3113,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
/* disable PCH DPLL */
- if (!intel_crtc->no_pll)
- intel_disable_pch_pll(dev_priv, pipe);
+ intel_disable_pch_pll(intel_crtc);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -3214,7 +3141,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_clear_scanline_wait(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -3242,6 +3168,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ intel_put_pch_pll(intel_crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -3313,7 +3245,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
- intel_clear_scanline_wait(dev);
}
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3333,6 +3264,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
@@ -3380,25 +3315,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
-
- /* Flush any pending WAITs before we disable the pipe. Note that
- * we need to drop the struct_mutex in order to acquire it again
- * during the lowlevel dpms routines around a couple of the
- * operations. It does not look trivial nor desirable to move
- * that locking higher. So instead we leave a window for the
- * submission of further commands on the fb before we can actually
- * disable it. This race with userspace exists anyway, and we can
- * only rely on the pipe being disabled by userspace after it
- * receives the hotplug notification and has flushed any pending
- * batches.
- */
- if (crtc->fb) {
- mutex_lock(&dev->struct_mutex);
- intel_finish_fb(crtc->fb);
- mutex_unlock(&dev->struct_mutex);
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ dev_priv->display.off(crtc);
+
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
@@ -3448,8 +3369,7 @@ void intel_encoder_commit(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_device *dev = encoder->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3487,6 +3407,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000; /* FIXME */
+}
+
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
@@ -3584,1342 +3509,6 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
-
-struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
-};
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i855_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i830_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params ironlake_display_wm_info = {
- ILK_DISPLAY_FIFO,
- ILK_DISPLAY_MAXWM,
- ILK_DISPLAY_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
- ILK_CURSOR_FIFO,
- ILK_CURSOR_MAXWM,
- ILK_CURSOR_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
- ILK_DISPLAY_SR_FIFO,
- ILK_DISPLAY_MAX_SRWM,
- ILK_DISPLAY_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
- ILK_CURSOR_SR_FIFO,
- ILK_CURSOR_MAX_SRWM,
- ILK_CURSOR_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
- SNB_DISPLAY_FIFO,
- SNB_DISPLAY_MAXWM,
- SNB_DISPLAY_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
- SNB_CURSOR_FIFO,
- SNB_CURSOR_MAXWM,
- SNB_CURSOR_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
- SNB_DISPLAY_SR_FIFO,
- SNB_DISPLAY_MAX_SRWM,
- SNB_DISPLAY_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
- SNB_CURSOR_SR_FIFO,
- SNB_CURSOR_MAX_SRWM,
- SNB_CURSOR_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @clock_in_khz: pixel clock
- * @wm: chip FIFO params
- * @pixel_size: display pixel size
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- const struct intel_watermark_params *wm,
- int fifo_size,
- int pixel_size,
- unsigned long latency_ns)
-{
- long entries_required, wm_size;
-
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
-
- DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
-
- wm_size = fifo_size - (entries_required + wm->guard_size);
-
- DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
-
- /* Don't promote wm_size to unsigned... */
- if (wm_size > (long)wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
- return wm_size;
-}
-
-struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
-};
-
-static const struct cxsr_latency cxsr_latency_table[] = {
- {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
- {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
- {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
-
- {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
- {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
- {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
-
- {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
- {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
- {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
-
- {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
- {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
- {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
-
- {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
- {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
- {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
-
- {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
- {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
- {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
- int fsb,
- int mem)
-{
- const struct cxsr_latency *latency;
- int i;
-
- if (fsb == 0 || mem == 0)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
- }
-
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
- return NULL;
-}
-
-static void pineview_disable_cxsr(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int latency_ns = 5000;
-
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- if (plane)
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x1ff;
- if (plane)
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
-
- return size;
-}
-
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
-{
- struct drm_crtc *crtc, *enabled = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
- if (enabled)
- return NULL;
- enabled = crtc;
- }
- }
-
- return enabled;
-}
-
-static void pineview_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- const struct cxsr_latency *latency;
- u32 reg;
- unsigned long wm;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- int clock = crtc->mode.clock;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
- } else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
- }
-}
-
-static bool g4x_compute_wm0(struct drm_device *dev,
- int plane,
- const struct intel_watermark_params *display,
- int display_latency_ns,
- const struct intel_watermark_params *cursor,
- int cursor_latency_ns,
- int *plane_wm,
- int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int htotal, hdisplay, clock, pixel_size;
- int line_time_us, line_count;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *cursor_wm = cursor->guard_size;
- *plane_wm = display->guard_size;
- return false;
- }
-
- htotal = crtc->mode.htotal;
- hdisplay = crtc->mode.hdisplay;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *plane_wm = entries + display->guard_size;
- if (*plane_wm > (int)display->max_wm)
- *plane_wm = display->max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = ((htotal * 1000) / clock);
- line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * 64 * pixel_size;
- tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
- if (*cursor_wm > (int)cursor->max_wm)
- *cursor_wm = (int)cursor->max_wm;
-
- return true;
-}
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool g4x_check_srwm(struct drm_device *dev,
- int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
- display_wm, cursor_wm);
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
- display_wm, display->max_wm);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
- cursor_wm, cursor->max_wm);
- return false;
- }
-
- if (!(display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("SR latency is 0, disabling\n");
- return false;
- }
-
- return true;
-}
-
-static bool g4x_compute_srwm(struct drm_device *dev,
- int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int hdisplay, htotal, pixel_size, clock;
- unsigned long line_time_us;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return g4x_check_srwm(dev,
- *display_wm, *cursor_wm,
- display, cursor);
-}
-
-#define single_plane_enabled(mask) is_power_of_2(mask)
-
-static void g4x_update_wm(struct drm_device *dev)
-{
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- unsigned int enabled = 0;
-
- if (g4x_compute_wm0(dev, 0,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1;
-
- if (g4x_compute_wm0(dev, 1,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 2;
-
- plane_sr = cursor_sr = 0;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &g4x_wm_info,
- &g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
-
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- planea_wm);
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- /* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i965_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- int srwm = 1;
- int cursor_sr = 16;
-
- /* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
- int clock = crtc->mode.clock;
- int htotal = crtc->mode.htotal;
- int hdisplay = crtc->mode.hdisplay;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = ((htotal * 1000) / clock);
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
- srwm = I965_FIFO_SIZE - entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x1ff;
- DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
- entries, srwm);
-
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
- entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size);
- cursor_sr = i965_cursor_wm_info.fifo_size -
- (entries + i965_cursor_wm_info.guard_size);
-
- if (cursor_sr > i965_cursor_wm_info.max_wm)
- cursor_sr = i965_cursor_wm_info.max_wm;
-
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
-
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
-
- /* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << 16) | (8 << 8) | (8 << 0));
- I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
- /* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i9xx_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct intel_watermark_params *wm_info;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
- int cwm, srwm = 1;
- int fifo_size;
- int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
-
- if (IS_I945GM(dev))
- wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
- wm_info = &i915_wm_info;
- else
- wm_info = &i855_wm_info;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
- planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- enabled = crtc;
- } else
- planea_wm = fifo_size - wm_info->guard_size;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
- planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- if (enabled == NULL)
- enabled = crtc;
- else
- enabled = NULL;
- } else
- planeb_wm = fifo_size - wm_info->guard_size;
-
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
-
- /* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
-
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
- int clock = enabled->mode.clock;
- int htotal = enabled->mode.htotal;
- int hdisplay = enabled->mode.hdisplay;
- int pixel_size = enabled->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = (htotal * 1000) / clock;
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
- srwm = wm_info->fifo_size - entries;
- if (srwm < 0)
- srwm = 1;
-
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev))
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
-
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
-
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
-
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
-
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
-}
-
-static void i830_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- uint32_t fwater_lo;
- int planea_wm;
-
- crtc = single_enabled_crtc(dev);
- if (crtc == NULL)
- return;
-
- planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
- fwater_lo |= (3<<8) | planea_wm;
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
-
- I915_WRITE(FW_BLC, fwater_lo);
-}
-
-#define ILK_LP0_PLANE_LATENCY 700
-#define ILK_LP0_CURSOR_LATENCY 1300
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
- int fbc_wm, int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
- " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
- if (fbc_wm > SNB_FBC_MAX_SRWM) {
- DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
- fbc_wm, SNB_FBC_MAX_SRWM, level);
-
- /* fbc has it's own way to disable FBC WM */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
- return false;
- }
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
- display_wm, SNB_DISPLAY_MAX_SRWM, level);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
- cursor_wm, SNB_CURSOR_MAX_SRWM, level);
- return false;
- }
-
- if (!(fbc_wm || display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
- return false;
- }
-
- return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *fbc_wm, int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int hdisplay, htotal, pixel_size, clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *fbc_wm = *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /*
- * Spec says:
- * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
- */
- *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return ironlake_check_srwm(dev, level,
- *fbc_wm, *display_wm, *cursor_wm,
- display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled))
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- ILK_READ_WM1_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- ILK_READ_WM2_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /*
- * WM3 is unsupported on ILK, probably because we don't have latency
- * data for that power state
- */
-}
-
-void sandybridge_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /* IVB has 3 pipes */
- if (IS_IVYBRIDGE(dev) &&
- g4x_compute_wm0(dev, 2,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEC_IVB);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEC_IVB, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 3;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3 */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int display_latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- int clock;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *sprite_wm = display->guard_size;
- return false;
- }
-
- clock = crtc->mode.clock;
-
- /* Use the small buffer method to calculate the sprite watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size -
- sprite_width * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
- if (*sprite_wm > (int)display->max_wm)
- *sprite_wm = display->max_wm;
-
- return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *sprite_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- clock = crtc->mode.clock;
- if (!clock) {
- *sprite_wm = 0;
- return false;
- }
-
- line_time_us = (sprite_width * 1000) / clock;
- if (!line_time_us) {
- *sprite_wm = 0;
- return false;
- }
-
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = sprite_width * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
-
- return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int sprite_wm, reg;
- int ret;
-
- switch (pipe) {
- case 0:
- reg = WM0_PIPEA_ILK;
- break;
- case 1:
- reg = WM0_PIPEB_ILK;
- break;
- case 2:
- reg = WM0_PIPEC_IVB;
- break;
- default:
- return; /* bad pipe */
- }
-
- ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
- &sandybridge_display_wm_info,
- latency, &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
- pipe);
- return;
- }
-
- val = I915_READ(reg);
- val &= ~WM0_PIPE_SPRITE_MASK;
- I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
- DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
-
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM1_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
- /* Only IVB has two more LP watermarks for sprite */
- if (!IS_IVYBRIDGE(dev))
- return;
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM2_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM2S_LP_IVB, sprite_wm);
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM3_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM3S_LP_IVB, sprite_wm);
-}
-
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-static void intel_update_watermarks(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(dev);
-}
-
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
- pixel_size);
-}
-
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
@@ -5143,6 +3732,222 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
}
}
+static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock->p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ /* set the dithering flag on LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(LVDS, temp);
+}
+
+static void i9xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+ bool is_sdvo;
+
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+ }
+}
+
+static void i8xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5156,15 +3961,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, dspcntr, pipeconf, vsyncshift;
- bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ u32 dspcntr, pipeconf, vsyncshift;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
- u32 temp;
- u32 lvds_sync = 0;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5180,15 +3983,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (encoder->needs_tv_clock)
is_tv = true;
break;
- case INTEL_OUTPUT_DVO:
- is_dvo = true;
- break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
@@ -5235,71 +4032,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
&reduced_clock : NULL);
- dpll = DPLL_VGA_MODE_DIS;
-
- if (!IS_GEN2(dev)) {
- if (is_lvds)
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
- if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (pixel_multiplier > 1) {
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
- dpll |= DPLL_DVO_HIGH_SPEED;
- }
- if (is_dp)
- dpll |= DPLL_DVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && has_reduced_clock)
- dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_INFO(dev)->gen >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- } else {
- if (is_lvds) {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock.p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock.p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
- }
-
- if (is_sdvo && is_tv)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (is_tv)
- /* XXX: just matching BIOS for now */
- /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ if (IS_GEN2(dev))
+ i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
else
- dpll |= PLL_REF_INPUT_DREFCLK;
+ i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
@@ -5336,97 +4074,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
- dpll |= DPLL_VCO_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (is_lvds) {
- temp = I915_READ(LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag on LVDS as needed */
- if (INTEL_INFO(dev)->gen >= 4) {
- if (dev_priv->lvds_dither)
- temp |= LVDS_ENABLE_DITHER;
- else
- temp &= ~LVDS_ENABLE_DITHER;
- }
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
- I915_WRITE(LVDS, temp);
- }
-
- if (is_dp) {
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
- }
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- if (INTEL_INFO(dev)->gen >= 4) {
- temp = 0;
- if (is_sdvo) {
- temp = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (temp > 1)
- temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- else
- temp = 0;
- }
- I915_WRITE(DPLL_MD(pipe), temp);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
- }
-
if (HAS_PIPE_CXSR(dev)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -5492,7 +4142,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5668,17 +4317,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
+ struct intel_encoder *encoder, *edp_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
- u32 lvds_sync = 0;
int target_clock, pixel_multiplier, lane, link_bw, factor;
unsigned int pipe_bpp;
bool dither;
+ bool is_cpu_edp = false, is_pch_edp = false;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5704,7 +4352,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = encoder;
+ is_dp = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ is_pch_edp = true;
+ else
+ is_cpu_edp = true;
+ edp_encoder = encoder;
break;
}
@@ -5767,15 +4420,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
lane = 0;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp) {
target_clock = mode->clock;
- intel_edp_link_config(has_edp_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp)
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
@@ -5866,7 +4517,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp && !is_cpu_edp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5909,30 +4560,22 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* PCH eDP needs FDI, but CPU eDP does not */
- if (!intel_crtc->no_pll) {
- if (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(PCH_FP0(pipe), fp);
- I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(PCH_DPLL(pipe));
- udelay(150);
- }
- } else {
- if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(0))) {
- intel_crtc->use_pll_a = true;
- DRM_DEBUG_KMS("using pipe a dpll\n");
- } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(1))) {
- intel_crtc->use_pll_a = false;
- DRM_DEBUG_KMS("using pipe b dpll\n");
- } else {
- DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
+ * pre-Haswell/LPT generation */
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
+ pipe);
+ } else if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
return -EINVAL;
}
- }
+ } else
+ intel_put_pch_pll(intel_crtc);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
@@ -5965,22 +4608,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
+ temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
+ temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
@@ -5990,7 +4622,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -6000,13 +4632,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
- if (!intel_crtc->no_pll &&
- (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pipe));
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -6014,20 +4644,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
intel_crtc->lowfreq_avail = false;
- if (!intel_crtc->no_pll) {
+ if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(PCH_FP1(pipe), fp2);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
- I915_WRITE(PCH_FP1(pipe), fp);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -6080,10 +4710,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- }
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -6097,6 +4725,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_watermarks(dev);
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
return ret;
}
@@ -6451,7 +5081,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
@@ -6461,9 +5091,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
else
i9xx_update_cursor(crtc, base);
}
-
- if (visible)
- intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -6987,7 +5614,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
return mode;
}
@@ -7086,7 +5712,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
- u32 dpll;
+ int dpll;
DRM_DEBUG_DRIVER("downclocking LVDS\n");
@@ -7100,6 +5726,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
}
+
}
/**
@@ -7158,12 +5785,16 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy)
+ if (!dev_priv->busy) {
+ intel_sanitize_pm(dev);
dev_priv->busy = true;
- else
+ } else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ if (obj == NULL)
+ return;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
@@ -7336,18 +5967,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
@@ -7356,15 +5988,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(0); /* aux display base address, unused */
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7377,33 +6013,38 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
-
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7415,24 +6056,25 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -7440,9 +6082,13 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7453,21 +6099,22 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0] | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(ring, obj->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7477,9 +6124,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7501,18 +6152,22 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
-out:
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7589,6 +6244,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
+ intel_mark_busy(dev, obj);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7617,10 +6273,11 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
+ int i;
/* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(pipe) {
- reg = PIPECONF(pipe);
+ for_each_pipe(i) {
+ reg = PIPECONF(i);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
@@ -7690,6 +6347,23 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_pch_pll_init(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->num_pch_pll == 0) {
+ DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
+ return;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
+ dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
+ dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
+ }
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7727,8 +6401,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
- if (pipe == 2 && IS_IVYBRIDGE(dev))
- intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
@@ -7747,15 +6419,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -7828,12 +6497,31 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_crt_init(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_HASWELL(dev)) {
+ int found;
+
+ /* Haswell uses DDI functions to detect digital outputs */
+ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+ /* DDI A only supports eDP */
+ if (found)
+ intel_ddi_init(dev, PORT_A);
+
+ /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ * register */
+ found = I915_READ(SFUSE_STRAP);
+
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev, PORT_D);
+ } else if (HAS_PCH_SPLIT(dev)) {
int found;
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, PCH_SDVOB);
+ found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -7857,7 +6545,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, SDVOB);
+ found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
@@ -7873,7 +6561,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, SDVOC);
+ found = intel_sdvo_init(dev, SDVOC, false);
}
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
@@ -8002,882 +6690,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_object_pin(ctx, 4096, true);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- mutex_unlock(&dev->struct_mutex);
- return NULL;
-}
-
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl;
-
- rgvswctl = I915_READ16(MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE16(MEMSWCTL, rgvswctl);
- POSTING_READ16(MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE16(MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-void ironlake_enable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
- u8 fmax, fmin, fstart, vstart;
-
- /* Enable temp reporting */
- I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- I915_WRITE(RCUPEI, 100000);
- I915_WRITE(RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- I915_WRITE(RCBMAXAVG, 90000);
- I915_WRITE(RCBMINAVG, 80000);
-
- I915_WRITE(MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
-
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
-
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- I915_WRITE(VIDSTART, vstart);
- POSTING_READ(VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- I915_WRITE(MEMMODECTL, rgvmodectl);
-
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- msleep(1);
-
- ironlake_set_drps(dev, fstart);
-
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
- I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->last_time2);
-}
-
-void ironlake_disable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
- I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
- I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
- I915_WRITE(DEIIR, DE_PCU_EVENT);
- I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- msleep(1);
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
- msleep(1);
-
-}
-
-void gen6_set_rps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
-
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
-}
-
-void gen6_disable_rps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
-
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-void intel_init_emon(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW + (i * 4), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW + (i * 4), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW + (i * 4), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL + (i * 4), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-static int intel_enable_rc6(struct drm_device *dev)
-{
- /*
- * Respect the kernel parameter if it is set
- */
- if (i915_enable_rc6 >= 0)
- return i915_enable_rc6;
-
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
-
- /*
- * Disable rc6 on Sandybridge
- */
- if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
- return INTEL_RC6_ENABLE;
- }
- DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
- return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
-}
-
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 pcu_mbox, rc6_mask = 0;
- u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
- int rc6_mode;
- int i;
-
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- I915_WRITE(GEN6_RC_STATE, 0);
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- gen6_gt_force_wake_get(dev_priv);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- rc6_mode = intel_enable_rc6(dev_priv->dev);
- if (rc6_mode & INTEL_RC6_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
-
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
-
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
-
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(10) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN6_FREQUENCY(12));
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
- I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
-
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
- }
-
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
-
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER,
- GEN6_PM_MBOX_EVENT |
- GEN6_PM_THERMAL_EVENT |
- GEN6_PM_RP_DOWN_TIMEOUT |
- GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_UP_EI_EXPIRED |
- GEN6_PM_RP_DOWN_EI_EXPIRED);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
- I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
- /* enable all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
-
- gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
- int scaling_factor = 180;
-
- max_ia_freq = cpufreq_quick_get_max(0);
- /*
- * Default to measured freq if none found, PCU will ensure we don't go
- * over
- */
- if (!max_ia_freq)
- max_ia_freq = tsc_khz;
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
- gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
-
- /*
- * For GPU frequencies less than 750MHz, just use the lowest
- * ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
- }
-
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-static void ironlake_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- /* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_3DCGDIS0,
- MARIUNIT_CLOCK_GATE_DISABLE |
- SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
- VFMUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- /*
- * According to the spec the following bits should be set in
- * order to enable memory self-refresh
- * The bit 22/21 of 0x42004
- * The bit 5 of 0x42020
- * The bit 15 of 0x45000
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
- DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /*
- * Based on the document from hardware guys the following bits
- * should be set unconditionally in order to enable FBC.
- * The bit 22 of 0x42000
- * The bit 22 of 0x42004
- * The bit 7,8,9 of 0x42020.
- */
- if (IS_IRONLAKE_M(dev)) {
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
- }
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
- I915_WRITE(_3D_CHICKEN2,
- _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
- _3D_CHICKEN2_WM_READ_PIPELINED);
-}
-
-static void gen6_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
- GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
-
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- */
- I915_WRITE(GEN6_UCGCTL2,
- GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * According to the spec the following bits should be
- * set in order to enable memory self-refresh and fbc:
- * The bit21 and bit22 of 0x42000
- * The bit21 and bit22 of 0x42004
- * The bit5 and bit7 of 0x42020
- * The bit14 of 0x70180
- * The bit14 of 0x71180
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void ivybridge_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating workaround.
- */
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
- I915_WRITE(IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
- /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1,
- GEN7_WA_FOR_GEN7_L3_CONTROL);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
-
- /* This is required by WaCatErrorRejectionIssue */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
- GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void g4x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate;
-
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
- GS_UNIT_CLOCK_GATE_DISABLE |
- CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
- dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
- OVRUNIT_CLOCK_GATE_DISABLE |
- OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
- dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-}
-
-static void crestline_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(DSPCLK_GATE_D, 0);
- I915_WRITE(RAMCLK_GATE_D, 0);
- I915_WRITE16(DEUC, 0);
-}
-
-static void broadwater_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
- I965_RCC_CLOCK_GATE_DISABLE |
- I965_RCPB_CLOCK_GATE_DISABLE |
- I965_ISC_CLOCK_GATE_DISABLE |
- I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
-}
-
-static void gen3_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dstate = I915_READ(D_STATE);
-
- dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
- DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
-}
-
-static void i85x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-}
-
-static void i830_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
-static void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
- }
-
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-
- ironlake_teardown_rc6(dev);
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
- return -ENOMEM;
-
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- mutex_lock(&dev->struct_mutex);
- ret = ironlake_setup_rc6(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = BEGIN_LP_RING(6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- OUT_RING(MI_SET_CONTEXT);
- OUT_RING(dev_priv->renderctx->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- OUT_RING(MI_SUSPEND_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_FLUSH);
- ADVANCE_LP_RING();
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_wait_ring_idle(LP_RING(dev_priv));
- if (ret) {
- DRM_ERROR("failed to enable ironlake power power savings\n");
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- mutex_unlock(&dev->struct_mutex);
-}
-
-void intel_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
-}
-
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -8887,32 +6699,20 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_CRESTLINE(dev)) {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
- }
- /* 855GM needs testing */
- }
-
/* Returns the core display clock speed */
- if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ valleyview_get_display_clock_speed;
+ else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
@@ -8934,124 +6734,27 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- /* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
- if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
- dev_priv->display.update_wm = ironlake_update_wm;
- else {
- DRM_DEBUG_KMS("Failed to get proper latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
- } else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
- DRM_INFO("failed to find known CxSR latency "
- "(found ddr%s fsb freq %d, mem freq %d), "
- "disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
- dev_priv->fsb_freq, dev_priv->mem_freq);
- /* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
- dev_priv->display.update_wm = NULL;
- } else
- dev_priv->display.update_wm = pineview_update_wm;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
- dev_priv->display.update_wm = g4x_update_wm;
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
- } else if (IS_GEN4(dev)) {
- dev_priv->display.update_wm = i965_update_wm;
- if (IS_CRESTLINE(dev))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
- } else if (IS_GEN3(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
- } else if (IS_I865G(dev)) {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- } else if (IS_I85X(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- } else {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
- if (IS_845G(dev))
- dev_priv->display.get_fifo_size = i845_get_fifo_size;
- else
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
/* Default just returns -ENODEV to indicate unsupported */
@@ -9090,7 +6793,7 @@ static void quirk_pipea_force(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
- DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+ DRM_INFO("applying pipe a force quirk\n");
}
/*
@@ -9100,6 +6803,18 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
}
struct intel_quirk {
@@ -9109,7 +6824,7 @@ struct intel_quirk {
void (*hook)(struct drm_device *dev);
};
-struct intel_quirk intel_quirks[] = {
+static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -9134,6 +6849,9 @@ struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -9166,7 +6884,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_reg = VGACNTRL;
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(1, VGA_SR_INDEX);
+ outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -9176,6 +6894,40 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
+static void ivb_pch_pwm_override(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * IVB has CPU eDP backlight regs too, set things up to let the
+ * PCH regs control the backlight
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
+ I915_WRITE(BLC_PWM_CPU_CTL, 0);
+ I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_init_clock_gating(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ }
+
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+
+ if (IS_IVYBRIDGE(dev))
+ ivb_pch_pwm_override(dev);
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9189,10 +6941,14 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ dev->mode_config.funcs = &intel_mode_funcs;
intel_init_quirks(dev);
+ intel_init_pm(dev);
+
+ intel_prepare_ddi(dev);
+
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -9217,22 +6973,12 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_pch_pll_init(dev);
+
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -9240,8 +6986,7 @@ void intel_modeset_init(struct drm_device *dev)
void intel_modeset_gem_init(struct drm_device *dev)
{
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
+ intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
}
@@ -9271,12 +7016,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_rc6(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_dpio(dev);
+
mutex_unlock(&dev->struct_mutex);
/* Disable the irq before mode object teardown, for the irq might
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4b637919f74..71c7096e386 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -688,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp;
+ int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -702,24 +702,30 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = intel_dp->panel_fixed_mode->clock;
}
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max bw %02x pixel clock %iKHz\n",
+ max_lane_count, bws[max_clock], mode->clock);
+
if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ mode_rate = intel_dp_link_required(mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(mode->clock, bpp)
- <= link_avail) {
+ if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("Display port link bw %02x lane "
- "count %d clock %d\n",
+ DRM_DEBUG_KMS("DP link bw %02x lane "
+ "count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
+ adjusted_mode->clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
return true;
}
}
@@ -1149,6 +1155,7 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
+ ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
pp = ironlake_get_pp_control(dev_priv);
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
@@ -1954,6 +1961,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
}
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+ u8 buf[3];
+
+ if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+}
+
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@@ -2137,6 +2161,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (status != connector_status_connected)
return status;
+ intel_dp_probe_oui(intel_dp);
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -2438,6 +2464,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -2483,6 +2510,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
+ if (!pp_on || !pp_off || !pp_div) {
+ DRM_INFO("bad panel power sequencing delays, disabling panel\n");
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
+ return;
+ }
+
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 715afa15302..3e0918834e7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,6 +45,18 @@
ret__; \
})
+#define wait_for_atomic_us(COND, US) ({ \
+ int i, ret__ = -ETIMEDOUT; \
+ for (i = 0; i < (US); i++) { \
+ if ((COND)) { \
+ ret__ = 0; \
+ break; \
+ } \
+ udelay(1); \
+ } \
+ ret__; \
+})
+
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
@@ -171,8 +183,8 @@ struct intel_crtc {
bool cursor_visible;
unsigned int bpp;
- bool no_pll; /* tertiary pipe for IVB */
- bool use_pll_a;
+ /* We can share PLLs across outputs if the timings match */
+ struct intel_pch_pll *pch_pll;
};
struct intel_plane {
@@ -196,6 +208,25 @@ struct intel_plane {
struct drm_intel_sprite_colorkey *key);
};
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@ -207,6 +238,8 @@ struct intel_plane {
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
+#define DIP_AVI_PR_1 0
+#define DIP_AVI_PR_2 1
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@@ -240,23 +273,36 @@ struct dip_infoframe {
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
- /* PB5 - PR 3:0 */
- uint8_t PR;
+ /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+ uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
- } avi;
+ } __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
- } spd;
+ } __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
+struct intel_hdmi {
+ struct intel_encoder base;
+ u32 sdvox_reg;
+ int ddc_bus;
+ int ddi_port;
+ uint32_t color_range;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
+};
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -296,8 +342,13 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
-extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+ bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev,
@@ -311,6 +362,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+
+void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -368,12 +423,9 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void ironlake_disable_rc6(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -411,16 +463,43 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
+
+extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 020a7d7f744..60ba50b956f 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -243,7 +243,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* that's not the case.
*/
intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
@@ -375,7 +375,7 @@ void intel_dvo_init(struct drm_device *dev)
* special cases, but otherwise default to what's defined
* in the spec.
*/
- if (dvo->gpio != 0)
+ if (intel_gmbus_is_port_valid(dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PORT_SSC;
@@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev)
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- i2c = &dev_priv->gmbus[gpio].adapter;
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
intel_dvo->dev = *dvo;
if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6e9ee33fd41..bf8690720a0 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, obj, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2d7f47b56b6..2ead3bf7c21 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,19 +37,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi {
- struct intel_encoder base;
- u32 sdvox_reg;
- int ddc_bus;
- uint32_t color_range;
- bool has_hdmi_sink;
- bool has_audio;
- enum hdmi_force_audio force_audio;
- void (*write_infoframe)(struct drm_encoder *encoder,
- struct dip_infoframe *frame);
-};
-
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
}
@@ -75,108 +63,246 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
frame->checksum = 0x100 - sum;
}
-static u32 intel_infoframe_index(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
- u32 flags = 0;
-
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_SELECT_AVI;
- break;
+ return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_SELECT_SPD;
- break;
+ return VIDEO_DIP_SELECT_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
-
- return flags;
}
-static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
{
- u32 flags = 0;
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_SPD_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
+}
- return flags;
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+ case DIP_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
}
-static void i9xx_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
-
- /* XXX first guess at handling video port, is this corrent? */
+ val &= ~VIDEO_DIP_PORT_MASK;
if (intel_hdmi->sdvox_reg == SDVOB)
- port = VIDEO_DIP_PORT_B;
+ val |= VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
- port = VIDEO_DIP_PORT_C;
+ val |= VIDEO_DIP_PORT_C;
else
return;
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
- val &= ~VIDEO_DIP_SELECT_MASK;
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
}
-static void ironlake_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
- u32 flags, val = I915_READ(reg);
+ u32 val = I915_READ(reg);
+
+ val &= ~VIDEO_DIP_PORT_MASK;
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ val |= VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ val |= VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ val |= VIDEO_DIP_PORT_D;
+ break;
+ default:
+ return;
+ }
intel_wait_for_vblank(dev, intel_crtc->pipe);
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (frame->type == DIP_TYPE_AVI)
+ val |= VIDEO_DIP_ENABLE_AVI;
+ else
+ val &= ~g4x_infoframe_enable(frame);
+
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+ unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(ctl_reg);
+
+ if (data_reg == 0)
+ return;
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(data_reg + i, *data);
+ data++;
+ }
+
+ val |= hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+}
+
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
@@ -189,7 +315,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
intel_hdmi->write_infoframe(encoder, frame);
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@@ -197,10 +324,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
.len = DIP_LEN_AVI,
};
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
intel_set_infoframe(encoder, &avi_if);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@@ -221,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
@@ -259,7 +388,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
- intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
@@ -334,7 +463,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -367,7 +497,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
*/
return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
}
static bool
@@ -379,7 +510,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
bool has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
@@ -393,8 +525,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
static int
intel_hdmi_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
+ struct drm_property *property,
+ uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
@@ -453,6 +585,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
+ .dpms = intel_ddi_dpms,
+ .mode_fixup = intel_hdmi_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_ddi_mode_set,
+ .commit = intel_encoder_commit,
+};
+
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
@@ -542,20 +682,60 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ intel_hdmi->ddi_port = PORT_B;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ intel_hdmi->ddi_port = PORT_C;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ intel_hdmi->ddi_port = PORT_D;
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else {
+ /* If we got an unknown sdvox_reg, things are pretty much broken
+ * in a way that we should let the kernel know about it */
+ BUG();
}
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) {
- intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
I915_WRITE(VIDEO_DIP_CTL, 0);
+ } else if (IS_VALLEYVIEW(dev)) {
+ intel_hdmi->write_infoframe = vlv_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+ } else if (IS_HASWELL(dev)) {
+ /* FIXME: Haswell has a new set of DIP frame registers, but we are
+ * just doing the minimal required for HDMI to work at this stage.
+ */
+ intel_hdmi->write_infoframe = hsw_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+ } else if (HAS_PCH_IBX(dev)) {
+ intel_hdmi->write_infoframe = ibx_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
} else {
- intel_hdmi->write_infoframe = ironlake_write_infoframe;
+ intel_hdmi->write_infoframe = cpt_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
}
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ if (IS_HASWELL(dev))
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
+ else
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8fdc9570021..4a9707dd0f9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -35,6 +35,20 @@
#include "i915_drm.h"
#include "i915_drv.h"
+struct gmbus_port {
+ const char *name;
+ int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+ { "ssc", GPIOB },
+ { "vga", GPIOA },
+ { "panel", GPIOC },
+ { "dpc", GPIOD },
+ { "dpb", GPIOE },
+ { "dpd", GPIOF },
+};
+
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 10
@@ -49,10 +63,7 @@ void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_GMBUS0, 0);
- else
- I915_WRITE(GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -140,63 +151,173 @@ static void set_data(void *data, int state_high)
POSTING_READ(bus->gpio_reg);
}
-static bool
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ intel_i2c_reset(dev_priv->dev);
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ udelay(I2C_RISEFALL_TIME);
+ return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- static const int map_pin_to_reg[] = {
- 0,
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- 0,
- GPIOF,
- };
struct i2c_algo_bit_data *algo;
- if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
- return false;
-
algo = &bus->bit_algo;
- bus->gpio_reg = map_pin_to_reg[pin];
- if (HAS_PCH_SPLIT(dev_priv->dev))
- bus->gpio_reg += PCH_GPIOA - GPIOA;
+ /* -1 to map pin pair to gmbus index */
+ bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
algo->getsda = get_data;
algo->getscl = get_clock;
+ algo->pre_xfer = intel_gpio_pre_xfer;
+ algo->post_xfer = intel_gpio_post_xfer;
algo->udelay = I2C_RISEFALL_TIME;
algo->timeout = usecs_to_jiffies(2200);
algo->data = bus;
+}
+
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 val, loop = 0;
+ u32 gmbus2;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ }
- return true;
+ return 0;
}
static int
-intel_i2c_quirk_xfer(struct intel_gmbus *bus,
- struct i2c_msg *msgs,
- int num)
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+ u32 val, loop;
+
+ val = loop = 0;
+ while (len && loop < 4) {
+ val |= *buf++ << (8 * loop++);
+ len -= 1;
+ }
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 gmbus2;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+{
+ return (i + 1 < num &&
+ !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus1_index = 0;
+ u32 gmbus5 = 0;
int ret;
- intel_i2c_reset(dev_priv->dev);
+ if (msgs[0].len == 2)
+ gmbus5 = GMBUS_2BYTE_INDEX_EN |
+ msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+ if (msgs[0].len == 1)
+ gmbus1_index = GMBUS_CYCLE_INDEX |
+ (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
- intel_i2c_quirk_set(dev_priv, true);
- set_data(bus, 1);
- set_clock(bus, 1);
- udelay(I2C_RISEFALL_TIME);
+ /* GMBUS5 holds 16-bit index */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, gmbus5);
- ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
- set_data(bus, 1);
- set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+ /* Clear GMBUS5 after each index transfer */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, 0);
return ret;
}
@@ -210,117 +331,108 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset, ret;
+ int i, reg_offset;
+ int ret = 0;
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
- reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+ reg_offset = dev_priv->gpio_mmio_base;
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
- u16 len = msgs[i].len;
- u8 *buf = msgs[i].buf;
-
- if (msgs[i].flags & I2C_M_RD) {
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
- do {
- u32 val, loop = 0;
-
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = I915_READ(GMBUS3 + reg_offset);
- do {
- *buf++ = val & 0xff;
- val >>= 8;
- } while (--len && ++loop < 4);
- } while (len);
+ u32 gmbus2;
+
+ if (gmbus_is_index_read(msgs, i, num)) {
+ ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+ i += 1; /* set i to the index of the read xfer */
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
- u32 val, loop;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
-
- while (len) {
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- POSTING_READ(GMBUS2+reg_offset);
- }
+ ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
- if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ if (ret == -ETIMEDOUT)
+ goto timeout;
+ if (ret == -ENXIO)
+ goto clear_err;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
+ 50);
+ if (ret)
goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ if (gmbus2 & GMBUS_SATOER)
goto clear_err;
}
- goto done;
+ /* Generate a STOP condition on the bus. Note that gmbus can't generata
+ * a STOP on the very first cycle. To simplify the code we
+ * unconditionally generate the STOP condition with an additional gmbus
+ * cycle. */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ ret = ret ?: i;
+ goto out;
clear_err:
+ /*
+ * Wait for bus to IDLE before clearing NAK.
+ * If we clear the NAK while bus is still active, then it will stay
+ * active and the next transaction may fail.
+ */
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10))
+ DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+ adapter->name);
+
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
-done:
- /* Mark the GMBUS interface as disabled after waiting for idle.
- * We will re-enable it at the start of the next xfer,
- * till then let it sleep.
+ DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
+ /*
+ * If no ACK is received during the address phase of a transaction,
+ * the adapter must report -ENXIO.
+ * It is not clear what to return if no ACK is received at other times.
+ * So, we always return -ENXIO in all NAK cases, to ensure we send
+ * it at least during the one case that is specified.
*/
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
- DRM_INFO("GMBUS timed out waiting for idle\n");
- I915_WRITE(GMBUS0 + reg_offset, 0);
- ret = i;
+ ret = -ENXIO;
goto out;
timeout:
- DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
- bus->reg0 & 0xff, bus->adapter.name);
+ DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- if (!bus->has_gpio) {
- ret = -EIO;
- } else {
- bus->force_bit = true;
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
- }
+ bus->force_bit = true;
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
out:
mutex_unlock(&dev_priv->gmbus_mutex);
return ret;
@@ -346,35 +458,26 @@ static const struct i2c_algorithm gmbus_algorithm = {
*/
int intel_setup_gmbus(struct drm_device *dev)
{
- static const char *names[GMBUS_NUM_PORTS] = {
- "disabled",
- "ssc",
- "vga",
- "panel",
- "dpc",
- "dpb",
- "reserved",
- "dpd",
- };
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
- GFP_KERNEL);
- if (dev_priv->gmbus == NULL)
- return -ENOMEM;
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else
+ dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ u32 port = i + 1; /* +1 to map gmbus index to pin pair */
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
- names[i]);
+ gmbus_ports[i].name);
bus->adapter.dev.parent = &dev->pdev->dev;
bus->dev_priv = dev_priv;
@@ -385,13 +488,13 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
/* By default use a conservative clock rate */
- bus->reg0 = i | GMBUS_RATE_100KHZ;
+ bus->reg0 = port | GMBUS_RATE_100KHZ;
- bus->has_gpio = intel_gpio_setup(bus, i);
-
- /* XXX force bit banging until GMBUS is fully debugged */
- if (bus->has_gpio)
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(dev))
bus->force_bit = true;
+
+ intel_gpio_setup(bus, port);
}
intel_i2c_reset(dev_priv->dev);
@@ -403,11 +506,18 @@ err:
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
return ret;
}
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+ unsigned port)
+{
+ WARN_ON(!intel_gmbus_is_port_valid(port));
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ &dev_priv->gmbus[port - 1].adapter : NULL;
+}
+
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
@@ -419,8 +529,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- if (bus->has_gpio)
- bus->force_bit = force_bit;
+ bus->force_bit = force_bit;
}
void intel_teardown_gmbus(struct drm_device *dev)
@@ -435,7 +544,4 @@ void intel_teardown_gmbus(struct drm_device *dev)
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
-
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9c71183629c..9dee82350de 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -480,7 +480,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ DRM_INFO("Skipping forced modeset for %s\n", id->ident);
return 1;
}
@@ -628,7 +628,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
+ DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
@@ -851,8 +851,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
child->device_type != DEVICE_TYPE_LFP)
continue;
- if (child->i2c_pin)
- *i2c_pin = child->i2c_pin;
+ if (intel_gmbus_is_port_valid(child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
* the VBT correctly. Since LVDS requires additional
@@ -993,7 +993,8 @@ bool intel_lvds_init(struct drm_device *dev)
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector,
- &dev_priv->gmbus[pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d1928e79d9b..d67ec3a51e4 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -56,7 +56,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
}
};
- return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
+ return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
+ msgs, 2) == 2;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 289140bc83c..18bd0af855d 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/acpi_io.h>
#include <acpi/video.h>
@@ -149,7 +151,7 @@ struct opregion_asle {
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
if (!(bclp & ASLE_BCLP_VALID))
@@ -161,7 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
max = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, bclp * max / 255);
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+ iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
@@ -198,14 +200,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -213,31 +215,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, asle->alsi);
+ asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, asle->pfit);
+ asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -250,7 +252,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -262,7 +264,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
asle_stat |= ASLE_PWM_FREQ_FAILED;
}
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
@@ -272,15 +274,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev))
intel_enable_asle(dev);
- asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
- ASLE_PFMB_EN;
- asle->ardy = 1;
+ iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+ ASLE_PFMB_EN,
+ &asle->tche);
+ iowrite32(1, &asle->ardy);
}
}
@@ -298,7 +301,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
Linux, these are handled by the dock, button and video drivers.
*/
- struct opregion_acpi *acpi;
+ struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
@@ -310,10 +313,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
acpi = system_opregion->acpi;
- if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ if (event->type == 0x80 &&
+ (ioread32(&acpi->cevt) & 1) == 0)
ret = NOTIFY_BAD;
- acpi->csts = 0;
+ iowrite32(0, &acpi->csts);
return ret;
}
@@ -337,6 +341,7 @@ static void intel_didl_outputs(struct drm_device *dev)
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
+ u32 temp;
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -355,7 +360,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- printk(KERN_WARNING "No ACPI video bus found\n");
+ pr_warn("No ACPI video bus found\n");
return;
}
@@ -371,7 +376,8 @@ static void intel_didl_outputs(struct drm_device *dev)
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
- opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ iowrite32((u32)(device_id & 0x0f0f),
+ &opregion->acpi->didl[i]);
i++;
}
}
@@ -379,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
- opregion->acpi->didl[i] = 0;
+ iowrite32(0, &opregion->acpi->didl[i]);
return;
blind_set:
@@ -413,7 +419,9 @@ blind_set:
output_type = ACPI_LVDS_OUTPUT;
break;
}
- opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ temp = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(temp | (1<<31) | output_type | i,
+ &opregion->acpi->didl[i]);
i++;
}
goto end;
@@ -434,8 +442,8 @@ void intel_opregion_init(struct drm_device *dev)
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
+ iowrite32(0, &opregion->acpi->csts);
+ iowrite32(1, &opregion->acpi->drdy);
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
@@ -454,7 +462,7 @@ void intel_opregion_fini(struct drm_device *dev)
return;
if (opregion->acpi) {
- opregion->acpi->drdy = 0;
+ iowrite32(0, &opregion->acpi->drdy);
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
@@ -474,8 +482,9 @@ int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
- void *base;
+ void __iomem *base;
u32 asls, mboxes;
+ char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -489,7 +498,9 @@ int intel_opregion_setup(struct drm_device *dev)
if (!base)
return -ENOMEM;
- if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memcpy_fromio(buf, base, sizeof(buf));
+
+ if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
@@ -499,7 +510,7 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->lid_state = base + ACPI_CLID;
- mboxes = opregion->header->mboxes;
+ mboxes = ioread32(&opregion->header->mboxes);
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 80b331c322f..458743da377 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,14 +187,14 @@ struct intel_overlay {
void (*flip_tail)(struct intel_overlay *);
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -203,7 +203,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap(regs);
@@ -215,20 +215,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
overlay->last_flip_req = 0;
return 0;
@@ -262,7 +263,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
- drm_mode_set_crtcinfo(mode, 0);
+
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret) {
kfree(request);
goto out;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- OUT_RING(overlay->flip_addr | OFC_UPDATE);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
int ret;
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret) {
kfree(request);
return ret;
}
/* wait for overlay to go idle */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_advance(ring);
return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
@@ -442,15 +446,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
@@ -619,14 +625,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x0800, 0x3000
};
-static void update_polyphase_filter(struct overlay_registers *regs)
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
{
- memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
- memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+ memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+ sizeof(uv_static_hcoeffs));
}
static bool update_scaling_factors(struct intel_overlay *overlay,
- struct overlay_registers *regs,
+ struct overlay_registers __iomem *regs,
struct put_image_params *params)
{
/* fixed point with a 12 bit shift */
@@ -665,16 +672,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
- ((xscale >> FP_SHIFT) << 16) |
- ((xscale & FRACT_MASK) << 3));
+ iowrite32(((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3),
+ &regs->YRGBSCALE);
- regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
- ((xscale_UV >> FP_SHIFT) << 16) |
- ((xscale_UV & FRACT_MASK) << 3));
+ iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3),
+ &regs->UVSCALE);
- regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
- ((yscale_UV >> FP_SHIFT) << 0)));
+ iowrite32((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)),
+ &regs->UVSCALEV);
if (scale_changed)
update_polyphase_filter(regs);
@@ -683,30 +693,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
}
static void update_colorkey(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
u32 key = overlay->color_key;
switch (overlay->crtc->base.fb->bits_per_pixel) {
case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ iowrite32(0, &regs->DCLRKV);
+ iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
case 16:
if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
} else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
}
break;
case 24:
case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
}
}
@@ -761,9 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct put_image_params *params)
{
int ret, tmp_width;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
+ u32 swidth, swidthsw, sheight, ostride;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -782,16 +795,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
+ u32 oconfig;
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
- regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
- regs->OCONFIG |= OCONF_CSC_MODE_BT709;
- regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
+ iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
@@ -805,42 +820,46 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
- regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
- regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+ iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+ iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
else
tmp_width = params->src_w;
- regs->SWIDTH = params->src_w;
- regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
- regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
- regs->OSTRIDE = params->stride_Y;
+ swidth = params->src_w;
+ swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ sheight = params->src_h;
+ iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+ ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
- regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ swidth |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
params->src_w/uv_hscale);
- regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
- regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
- regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
- regs->OSTRIDE |= params->stride_UV << 16;
+ swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+ sheight |= (params->src_h/uv_vscale) << 16;
+ iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
+ iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+ ostride |= params->stride_UV << 16;
}
+ iowrite32(swidth, &regs->SWIDTH);
+ iowrite32(swidthsw, &regs->SWIDTHSW);
+ iowrite32(sheight, &regs->SHEIGHT);
+ iowrite32(ostride, &regs->OSTRIDE);
+
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
- regs->OCMD = overlay_cmd_reg(params);
+ iowrite32(overlay_cmd_reg(params), &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
@@ -860,7 +879,7 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
struct drm_device *dev = overlay->dev;
int ret;
@@ -879,7 +898,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
return ret;
regs = intel_overlay_map_regs(overlay);
- regs->OCMD = 0;
+ iowrite32(0, &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay);
@@ -1109,11 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1250,10 +1265,11 @@ out_free:
}
static void update_reg_attrs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
- regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
- regs->OCLRC1 = overlay->saturation;
+ iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ &regs->OCLRC0);
+ iowrite32(overlay->saturation, &regs->OCLRC1);
}
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1306,14 +1322,10 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1396,7 +1408,7 @@ void intel_setup_overlay(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
if (!HAS_OVERLAY(dev))
@@ -1451,7 +1463,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!regs)
goto out_unpin_bo;
- memset(regs, 0, sizeof(struct overlay_registers));
+ memset_io(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
update_reg_attrs(overlay, regs);
@@ -1499,14 +1511,17 @@ struct intel_overlay_error_state {
u32 isr;
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ /* Cast to make sparse happy, but it's wc memory anyway, so
+ * equivalent to the wc io mapping on X86. */
+ regs = (struct overlay_registers __iomem *)
+ overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -1515,7 +1530,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap_atomic(regs);
@@ -1540,9 +1555,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = (long) overlay->reg_bo->gtt_offset;
+ error->base = overlay->reg_bo->gtt_offset;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 48177ec4720..2a1625d84a6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -28,6 +28,9 @@
* Chris Wilson <chris@chris-wilson.co.uk>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
#include "intel_drv.h"
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -169,7 +172,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
- printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ pr_warn_once("fixme: max PWM is zero\n");
return 1;
}
@@ -189,6 +192,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
return max;
}
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (i915_panel_invert_brightness < 0)
+ return val;
+
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
+ return intel_panel_get_max_backlight(dev) - val;
+
+ return val;
+}
+
u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -209,6 +233,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
}
}
+ val = intel_panel_compute_brightness(dev, val);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
@@ -226,6 +251,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
u32 tmp;
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ level = intel_panel_compute_brightness(dev, level);
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
@@ -342,6 +368,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
else
return -ENODEV;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = intel_panel_get_max_backlight(dev);
dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 00000000000..8e79ff67ec9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,3796 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 64B units */
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_update_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int enable_fbc;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!i915_powersave)
+ return;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 6)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ goto out_disable;
+ }
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc, 500);
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+ VALLEYVIEW_FIFO_SIZE,
+ VALLEYVIEW_MAX_WM,
+ VALLEYVIEW_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ VALLEYVIEW_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+ int plane,
+ int *plane_prec_mult,
+ int *plane_dl,
+ int *cursor_prec_mult,
+ int *cursor_dl)
+{
+ struct drm_crtc *crtc;
+ int clock, pixel_size;
+ int entries;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
+
+ clock = crtc->mode.clock; /* VESA DOT Clock */
+ pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
+
+ entries = (clock / 1000) * pixel_size;
+ *plane_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+ pixel_size);
+
+ entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
+ *cursor_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+ return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_prec, planea_dl, planeb_prec, planeb_dl;
+ int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+ int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+ either 16 or 32 */
+
+ /* For plane A, Cursor A */
+ if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+ &cursor_prec_mult, &cursora_dl)) {
+ cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+ I915_WRITE(VLV_DDL1, cursora_prec |
+ (cursora_dl << DDL_CURSORA_SHIFT) |
+ planea_prec | planea_dl);
+ }
+
+ /* For plane B, Cursor B */
+ if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+ &cursor_prec_mult, &cursorb_dl)) {
+ cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+ I915_WRITE(VLV_DDL2, cursorb_prec |
+ (cursorb_dl << DDL_CURSORB_SHIFT) |
+ planeb_prec | planeb_dl);
+ }
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void valleyview_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ vlv_update_drain_latency(dev);
+
+ if (g4x_compute_wm0(dev, 0,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ else
+ I915_WRITE(FW_BLC_SELF_VLV,
+ I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = ((htotal * 1000) / clock);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = (htotal * 1000) / clock;
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+static void i830_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static void ironlake_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled))
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
+
+static void sandybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if ((dev_priv->num_pipe == 3) &&
+ g4x_compute_wm0(dev, 2,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 3;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void
+haswell_update_linetime_wm(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(PIPE_WM_LINETIME(pipe));
+ temp &= ~PIPE_WM_LINETIME_MASK;
+
+ /* The WM are computed with base on how long it takes to fill a single
+ * row at the given clock rate, multiplied by 8.
+ * */
+ temp |= PIPE_WM_LINETIME_TIME(
+ ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+
+ /* IPS watermarks are only used by pipe A, and are ignored by
+ * pipes B and C. They are calculated similarly to the common
+ * linetime values, except that we are using CD clock frequency
+ * in MHz instead of pixel rate for the division.
+ *
+ * This is a placeholder for the IPS watermark calculation code.
+ */
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+}
+
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
+}
+
+void intel_update_linetime_watermarks(struct drm_device *dev,
+ int pipe, struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_linetime_wm)
+ dev_priv->display.update_linetime_wm(dev, pipe, mode);
+}
+
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_object_pin(ctx, 4096, true);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fstart;
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ msleep(1);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->last_time2);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->fstart);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 swreq;
+
+ swreq = (val & 0x3ff) << 25;
+ I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->rps_lock);
+ dev_priv->pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps_lock);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /* Sorry Haswell, no RC6 for you for now. */
+ if (IS_HASWELL(dev))
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
+ }
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
+ int cur_freq, min_freq, max_freq;
+ int rc6_mode;
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ 18 << 24 |
+ 6 << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ min_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq = rp_state_cap & 0xff;
+ cur_freq = (gt_perf_status & 0xff00) >> 8;
+
+ /* Check for overclock support */
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+ pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+ if (pcu_mbox & (1<<31)) { /* OC supported */
+ max_freq = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = max_freq;
+ dev_priv->min_delay = min_freq;
+ dev_priv->cur_delay = cur_freq;
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ spin_lock_irq(&dev_priv->rps_lock);
+ WARN_ON(dev_priv->pm_iir != 0);
+ I915_WRITE(GEN6_PMIMR, 0);
+ spin_unlock_irq(&dev_priv->rps_lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ gen6_gt_force_wake_put(dev_priv);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
+void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+
+ ironlake_teardown_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = ironlake_setup_rc6(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = intel_ring_begin(ring, 6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_advance(ring);
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_wait_ring_idle(ring);
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power power savings\n");
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ diff = div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ ret = div_u64(ret, 10);
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ dev_priv->chipset_power = ret;
+
+ return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ if (dev_priv->info->gen != 5)
+ return;
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ diff = div_u64(diff, diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
+ ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /* Required for FBC */
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
+ }
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+ I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+ _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+ reg &= ~GEN7_FF_SCHED_MASK;
+ reg |= GEN7_FF_TS_SCHED_HW;
+ reg |= GEN7_FF_VS_SCHED_HW;
+ reg |= GEN7_FF_DS_SCHED_HW;
+
+ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+
+ if (IS_PINEVIEW(dev))
+ I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+
+ if (dev_priv->display.init_pch_clock_gating)
+ dev_priv->display.init_pch_clock_gating(dev);
+}
+
+static void gen6_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits, delay, old;
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ limits &= ~(0x3f << 16 | 0x3f << 24);
+ delay = dev_priv->cur_delay;
+ if (delay < dev_priv->max_delay)
+ limits |= (dev_priv->max_delay & 0x3f) << 24;
+ if (delay > dev_priv->min_delay)
+ limits |= (dev_priv->min_delay & 0x3f) << 16;
+
+ if (old != limits) {
+ DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
+ limits, old);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ }
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+void intel_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.sanitize_pm)
+ dev_priv->display.sanitize_pm(dev);
+}
+
+/* Starting with Haswell, we have different power wells for
+ * different parts of the GPU. This attempts to enable them all.
+ */
+void intel_init_power_wells(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long power_wells[] = {
+ HSW_PWR_WELL_CTL1,
+ HSW_PWR_WELL_CTL2,
+ HSW_PWR_WELL_CTL4
+ };
+ int i;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
+ int well = I915_READ(power_wells[i]);
+
+ if ((well & HSW_PWR_WELL_STATE) == 0) {
+ I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
+ if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* For cxsr */
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev))
+ dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
+ else if (HAS_PCH_CPT(dev))
+ dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
+
+ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_HASWELL(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ valleyview_init_clock_gating;
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_I865G(dev)) {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ } else {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
+ */
+ intel_init_power_wells(dev);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 62892a826ed..b59b6d5b758 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
}
static int
-render_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
struct drm_device *dev = ring->dev;
u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
*/
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
- if (INTEL_INFO(dev)->gen < 4) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
@@ -290,9 +307,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
- I915_READ_START(ring) != obj->gtt_offset ||
- (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
+ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -384,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- I915_WRITE(MI_MODE, mode);
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
- GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
}
if (INTEL_INFO(dev)->gen >= 5) {
@@ -398,7 +414,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -406,13 +421,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
* policy is not supported."
*/
I915_WRITE(CACHE_MODE_0,
- CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6) {
- I915_WRITE(INSTPM,
- INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return ret;
}
@@ -483,21 +496,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
* @seqno - seqno which the waiter will block on
*/
static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- int ring,
- u32 seqno)
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
{
int ret;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
+
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
- intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+ intel_ring_emit(waiter,
+ dw1 | signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
@@ -506,47 +528,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
return 0;
}
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- RCS,
- seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- VCS,
- seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- BCS,
- seqno);
-}
-
-
-
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -608,27 +589,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
return 0;
}
-static int
-render_ring_add_request(struct intel_ring_buffer *ring,
- u32 *result)
-{
- u32 seqno = i915_gem_next_request_seqno(ring);
- int ret;
-
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
-
- *result = seqno;
- return 0;
-}
-
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
{
@@ -655,76 +615,115 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
return pc->cpu_page[0];
}
-static void
-ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-static void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_enable_irq(dev_priv,
- GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_disable_irq(dev_priv,
- GT_USER_INTERRUPT |
- GT_PIPE_NOTIFY);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -776,7 +775,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-ring_add_request(struct intel_ring_buffer *ring,
+i9xx_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
u32 seqno;
@@ -799,10 +798,11 @@ ring_add_request(struct intel_ring_buffer *ring,
}
static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
@@ -812,120 +812,87 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
* blt/bsd rings on ivb. */
gen6_gt_force_wake_get(dev_priv);
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- ring->irq_mask &= ~rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_enable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- ring->irq_mask |= rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_disable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~0);
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
gen6_gt_force_wake_put(dev_priv);
}
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev->irq_enabled)
- return false;
+ int ret;
- spin_lock(&ring->irq_lock);
- if (ring->irq_refcount++ == 0) {
- if (IS_G4X(dev))
- i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- return true;
-}
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
- spin_lock(&ring->irq_lock);
- if (--ring->irq_refcount == 0) {
- if (IS_G4X(dev))
- i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ return 0;
}
static int
-ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
static int
-render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
- struct drm_device *dev = ring->dev;
int ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- } else {
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
- } else {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6));
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- }
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_advance(ring);
return 0;
@@ -933,7 +900,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
@@ -944,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
-
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
static int init_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
@@ -972,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
}
ring->status_page.obj = obj;
@@ -992,8 +954,8 @@ err:
return ret;
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -1002,10 +964,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
+ ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
- spin_lock_init(&ring->irq_lock);
- ring->irq_mask = ~0;
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
@@ -1026,20 +987,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unref;
- ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj->gtt_offset;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
+ ring->size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
goto err_unpin;
}
- ring->virtual_start = ring->map.handle;
ret = ring->init(ring);
if (ret)
goto err_unmap;
@@ -1055,7 +1010,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
return 0;
err_unmap:
- drm_core_ioremapfree(&ring->map, dev);
+ iounmap(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
@@ -1083,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, 0);
- drm_core_ioremapfree(&ring->map, ring->dev);
+ iounmap(ring->virtual_start);
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
@@ -1097,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
- unsigned int *virt;
+ uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
if (ring->space < rem) {
@@ -1106,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return ret;
}
- virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 8;
- while (rem--) {
- *virt++ = MI_NOOP;
- *virt++ = MI_NOOP;
- }
+ virt = ring->virtual_start + ring->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
ring->tail = 0;
ring->space = ring_space(ring);
@@ -1132,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- ret = i915_wait_request(ring, seqno, true);
+ ret = i915_wait_request(ring, seqno);
dev_priv->mm.interruptible = was_interruptible;
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);
return ret;
}
@@ -1208,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return ret;
trace_i915_ring_wait_begin(ring);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
- else
- end = jiffies + 3 * HZ;
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
@@ -1268,48 +1220,14 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
ring->tail &= ring->size - 1;
+ if (dev_priv->stop_rings & intel_ring_flag(ring))
+ return;
ring->write_tail(ring, ring->tail);
}
-static const struct intel_ring_buffer render_ring = {
- .name = "render ring",
- .id = RCS,
- .mmio_base = RENDER_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_render_ring,
- .write_tail = ring_write_tail,
- .flush = render_ring_flush,
- .add_request = render_ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = render_ring_get_irq,
- .irq_put = render_ring_put_irq,
- .dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
- .sync_to = render_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_RV,
- MI_SEMAPHORE_SYNC_RB},
- .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
-};
-
-/* ring buffer for bit-stream decoder */
-
-static const struct intel_ring_buffer bsd_ring = {
- .name = "bsd ring",
- .id = VCS,
- .mmio_base = BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = bsd_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = bsd_ring_get_irq,
- .irq_put = bsd_ring_put_irq,
- .dispatch_execbuffer = ring_dispatch_execbuffer,
-};
-
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
@@ -1372,77 +1290,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
-static bool
-gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static void
-gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static bool
-gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-static void
-gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-/* ring buffer for Video Codec for Gen6+ */
-static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = VCS,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = gen6_bsd_ring_get_irq,
- .irq_put = gen6_bsd_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_bsd_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
- MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_VB},
- .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
-};
-
/* Blitter support (SandyBridge+) */
-static bool
-blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
-static void
-blt_ring_put_irq(struct intel_ring_buffer *ring)
-{
- gen6_ring_put_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
@@ -1464,42 +1313,63 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
-static const struct intel_ring_buffer gen6_blt_ring = {
- .name = "blt ring",
- .id = BCS,
- .mmio_base = BLT_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = blt_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = blt_ring_get_irq,
- .irq_put = blt_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_blt_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
- MI_SEMAPHORE_SYNC_BV,
- MI_SEMAPHORE_SYNC_INVALID},
- .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
-};
-
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+ ring->signal_mbox[0] = GEN6_VRSYNC;
+ ring->signal_mbox[1] = GEN6_BRSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
}
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
if (!I915_NEED_GFX_HWS(dev)) {
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1514,15 +1384,41 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
- ring->add_request = gen6_add_request;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
- } else if (IS_GEN5(dev)) {
- ring->add_request = pc_render_add_request;
- ring->get_seqno = pc_render_get_seqno;
+ /* non-kms not supported on gen6+ */
+ return -ENODEV;
+ }
+
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
}
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
if (!I915_NEED_GFX_HWS(dev))
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1537,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
if (IS_I830(ring->dev))
ring->effective_size -= 128;
- ring->map.offset = start;
- ring->map.size = size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(start, size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
- ring->virtual_start = (void __force __iomem *)ring->map.handle;
return 0;
}
@@ -1559,10 +1448,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
- if (IS_GEN6(dev) || IS_GEN7(dev))
- *ring = gen6_bsd_ring;
- else
- *ring = bsd_ring;
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+ ring->signal_mbox[0] = GEN6_RVSYNC;
+ ring->signal_mbox[1] = GEN6_BVSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
return intel_init_ring_buffer(dev, ring);
}
@@ -1572,7 +1497,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- *ring = gen6_blt_ring;
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = blt_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->signal_mbox[0] = GEN6_RBSYNC;
+ ring->signal_mbox[1] = GEN6_VBSYNC;
+ ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b8fa4..55d3da26bae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,7 +2,7 @@
#define _INTEL_RINGBUFFER_H_
struct intel_hw_status_page {
- u32 __iomem *page_addr;
+ u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
@@ -56,12 +56,9 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
- spinlock_t irq_lock;
- u32 irq_refcount;
- u32 irq_mask;
- u32 irq_seqno; /* last seq seem at irq time */
+ u32 irq_refcount; /* protected by dev_priv->irq_lock */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
- u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
@@ -118,11 +115,16 @@ struct intel_ring_buffer {
u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
- drm_local_map_t map;
void *private;
};
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+ return ring->obj != NULL;
+}
+
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
@@ -152,7 +154,9 @@ static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
- return ioread32(ring->status_page.page_addr + reg);
+ /* Ensure that the compiler doesn't optimize away the load. */
+ barrier();
+ return ring->status_page.page_addr[reg];
}
/**
@@ -170,10 +174,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
-#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ae5e748f39b..a949b73880c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -41,7 +41,7 @@
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
-#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
@@ -74,7 +74,7 @@ struct intel_sdvo {
struct i2c_adapter ddc;
/* Register for the SDVO device: SDVOB or SDVOC */
- int sdvo_reg;
+ uint32_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
@@ -114,6 +114,9 @@ struct intel_sdvo {
*/
bool is_tv;
+ /* On different gens SDVOB is at different places. */
+ bool is_sdvob;
+
/* This is for current tv format name */
int tv_format_index;
@@ -403,8 +406,7 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
@@ -441,9 +443,17 @@ static const char *cmd_status_names[] = {
static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
- int i, ret;
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true;
+
+ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
+ if (!buf)
+ return false;
+
+ msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs)
+ return false;
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
@@ -477,15 +487,19 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return false;
+ ret = false;
+ goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
- return false;
+ ret = false;
}
- return true;
+out:
+ kfree(msgs);
+ kfree(buf);
+ return ret;
}
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
@@ -733,18 +747,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
- width = mode->crtc_hdisplay;
- height = mode->crtc_vdisplay;
+ width = mode->hdisplay;
+ height = mode->vdisplay;
/* do some mode translations */
- h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
- h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
- v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
- v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
- h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
- v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
mode_clock = mode->clock;
mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -873,17 +887,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 };
- uint64_t *data = (uint64_t *)&avi_if;
+ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ uint64_t *data = (uint64_t *)sdvo_data;
unsigned i;
intel_dip_infoframe_csum(&avi_if);
+ /* sdvo spec says that the ecc is handled by the hw, and it looks like
+ * we must not send the ecc field, either. */
+ memcpy(sdvo_data, &avi_if, 3);
+ sdvo_data[3] = avi_if.checksum;
+ memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
- for (i = 0; i < sizeof(avi_if); i += 8) {
+ for (i = 0; i < sizeof(sdvo_data); i += 8) {
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_DATA,
data, 8))
@@ -1260,10 +1281,11 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
return drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->crt_ddc_pin));
}
-enum drm_connector_status
+static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
@@ -1349,8 +1371,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
/* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
mdelay(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@ -1570,9 +1591,6 @@ end:
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
- 0);
-
intel_sdvo->is_lvds = true;
break;
}
@@ -1901,7 +1919,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *mapping;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
@@ -1919,7 +1937,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
u8 pin;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
@@ -1928,12 +1946,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
if (mapping->initialized)
pin = mapping->i2c_pin;
- if (pin < GMBUS_NUM_PORTS) {
- sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ if (intel_gmbus_is_port_valid(pin)) {
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
- sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
}
@@ -1944,12 +1962,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
}
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (IS_SDVOB(sdvo_reg)) {
+ if (sdvo->is_sdvob) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -1974,7 +1992,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (IS_SDVOB(sdvo_reg))
+ if (sdvo->is_sdvob)
return 0x70;
else
return 0x72;
@@ -2199,6 +2217,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
if (flags & SDVO_OUTPUT_RGB0)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
@@ -2490,7 +2512,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
return i2c_add_adapter(&sdvo->ddc) == 0;
}
-bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
@@ -2502,7 +2524,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return false;
intel_sdvo->sdvo_reg = sdvo_reg;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo->is_sdvob = is_sdvob;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
kfree(intel_sdvo);
@@ -2519,13 +2542,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
}
- if (IS_SDVOB(sdvo_reg))
+ if (intel_sdvo->is_sdvob)
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@@ -2546,8 +2569,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index e90dfb625c4..2a20fb0781d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -110,14 +110,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
- dev_priv->sprite_scaling_enabled = true;
- sandybridge_update_wm(dev);
- intel_wait_for_vblank(dev, pipe);
+ if (!dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = true;
+ intel_update_watermarks(dev);
+ intel_wait_for_vblank(dev, pipe);
+ }
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- sandybridge_update_wm(dev);
+ if (dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ intel_update_watermarks(dev);
+ }
}
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -133,7 +137,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -149,8 +153,11 @@ ivb_disable_plane(struct drm_plane *plane)
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
- I915_WRITE(SPRSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
+
+ dev_priv->sprite_scaling_enabled = false;
+ intel_update_watermarks(dev);
}
static int
@@ -208,7 +215,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
-snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -218,7 +225,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
- u32 dvscntr, dvsscale = 0;
+ u32 dvscntr, dvsscale;
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -262,8 +269,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
- /* must disable */
- dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ if (IS_GEN6(dev))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
/* Sizes are 0 based */
@@ -274,7 +281,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
- if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = 0;
+ if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -290,12 +298,12 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
-snb_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -306,7 +314,7 @@ snb_disable_plane(struct drm_plane *plane)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
- I915_WRITE(DVSSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
}
@@ -333,7 +341,7 @@ intel_disable_primary(struct drm_crtc *crtc)
}
static int
-snb_update_colorkey(struct drm_plane *plane,
+ilk_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
@@ -362,7 +370,7 @@ snb_update_colorkey(struct drm_plane *plane,
}
static void
-snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -550,14 +558,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -584,14 +591,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
mutex_lock(&dev->mode_config.mutex);
@@ -616,6 +622,14 @@ static const struct drm_plane_funcs intel_plane_funcs = {
.destroy = intel_destroy_plane,
};
+static uint32_t ilk_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
static uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -630,34 +644,56 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
+ const uint32_t *plane_formats;
+ int num_plane_formats;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
- if (IS_GEN6(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ case 6:
intel_plane->max_downscale = 16;
- intel_plane->update_plane = snb_update_plane;
- intel_plane->disable_plane = snb_disable_plane;
- intel_plane->update_colorkey = snb_update_colorkey;
- intel_plane->get_colorkey = snb_get_colorkey;
- } else if (IS_GEN7(dev)) {
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_colorkey = ilk_update_colorkey;
+ intel_plane->get_colorkey = ilk_get_colorkey;
+
+ if (IS_GEN6(dev)) {
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ }
+ break;
+
+ case 7:
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ break;
+
+ default:
+ return -ENODEV;
}
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs, snb_plane_formats,
- ARRAY_SIZE(snb_plane_formats), false);
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ false);
if (ret)
kfree(intel_plane);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 05f765ef546..3346612d295 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -811,7 +811,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -1153,6 +1153,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
DAC_B_0_7_V |
DAC_C_0_7_V);
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
@@ -1240,11 +1249,8 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, 0);
- if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- type = intel_tv_detect_type(intel_tv, connector);
- } else if (force) {
+ if (force) {
struct intel_load_detect_pipe tmp;
if (intel_get_load_detect_pipe(&intel_tv->base, connector,